diff --git "a/471.jsonl" "b/471.jsonl" new file mode 100644--- /dev/null +++ "b/471.jsonl" @@ -0,0 +1,643 @@ +{"seq_id":"449677066","text":"from canvas_sdk.methods import courses, external_tools, modules, assignments\nfrom canvas_sdk import RequestContext\nimport json\nimport collections\n\n# Instructor should provide following information\noauth_token = '7~98d8KnPbCj7wnA3GLyHIvh4yxHz7t2U6SPp7OLdvscWSN82qCnZbEWebFJVEb0b3'\ncanvas_url = 'https://canvas.instructure.com/api'\ncourse_code = \"OpenDSA\"\n\n\n# init the request context\nrequest_ctx = RequestContext(oauth_token, canvas_url)\n\n# get course_id\nresults = courses.list_your_courses(request_ctx, 'total_scores')\nfor i, course in enumerate(results.json()):\n if course.get(\"course_code\") == course_code:\n course_id = course.get(\"id\")\n\n# Instructor should provide external tool info\nexternal_tool_name = \"ltitest\"\nprivacy_level = \"public\"\nconsumer_key = \"test\"\nshared_secret = \"secret\"\nconfig_type = \"by_url\"\nconfig_url = \"https://127.0.0.1:9443/tool_config.xml\"\n\n# configure the course external_tool\n# results = external_tools.create_external_tool_courses(\n# request_ctx, course_id, external_tool_name, privacy_level=privacy_level,\n# consumer_key=consumer_key, shared_secret=shared_secret,\n# config_type=config_type, config_url=config_url)\n\n\nwith open('CS3114.json') as data_file:\n config_data = json.load(\n data_file, object_pairs_hook=collections.OrderedDict)\n\n# update the course name\ncourse_name = config_data.get(\"title\")\nresults = courses.update_course(\n request_ctx, course_id, course_name=course_name)\n\nchapters = config_data.get(\"chapters\")\n\nfor chapter in chapters:\n chapter_obj = chapters[str(chapter)]\n # OpenDSA chapters will map to canvas modules\n results = modules.create_module(\n request_ctx, course_id, str(chapter) + \" Chapter\")\n module_id = results.json().get(\"id\")\n for module in chapter_obj:\n module_obj = chapter_obj[str(module)]\n module_name = module_obj.get(\"long_name\")\n # OpenDSA module header will map to canvas text header\n results = modules.create_module_item(\n request_ctx, course_id, module_id, 'SubHeader',\n module_item_content_id=None,\n module_item_title=module_name + \" Module\",\n module_item_indent=0)\n item_id = results.json().get(\"id\")\n exercises = module_obj.get(\"exercises\")\n if bool(exercises):\n exercise_counter = 1\n for exercise in exercises:\n exercise_obj = exercises[str(exercise)]\n long_name = exercise_obj.get(\"long_name\")\n points = exercise_obj.get(\"points\", 0)\n if long_name:\n print(str(exercise_counter).zfill(2)) + \\\n \" \" + long_name\n # OpenDSA exercises will map to canvas assignments\n results = assignments.create_assignment(\n request_ctx, course_id,\n long_name,\n assignment_submission_types=\"external_tool\",\n assignment_external_tool_tag_attributes={\n \"url\": \"https://127.0.0.1:9443/lti_tool?problem_type=module&problem_url=CS3114/html/&short_name=\" + module_name + \"-\" + str(exercise_counter).zfill(2)},\n assignment_points_possible=points,\n assignment_description=long_name)\n assignment_id = results.json().get(\"id\")\n\n # add assignment to module\n results = modules.create_module_item(\n request_ctx, course_id, module_id,\n 'Assignment', module_item_content_id=assignment_id,\n module_item_indent=1)\n exercise_counter += 1\n else:\n results = modules.create_module_item(\n request_ctx, course_id, module_id,\n 'ExternalTool',\n module_item_external_url=\"https://127.0.0.1:9443/lti_tool?problem_type=module&problem_url=CS3114/html/&short_name=\" +\n module_name + \"-00\",\n module_item_content_id=None,\n module_item_title=module_name + \" Module\",\n module_item_indent=1)\n\n # publish the module\n results = modules.update_module(\n request_ctx, course_id, module_id, module_published=True)\n","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":4289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"171967105","text":"#!/usr/bin/env python\nimport os\nimport re\nfrom ciutils.cmdutils import CMDExecutor, CMDExecutorError\nfrom cierrors import CIBasicError\nfrom ciutils.fileutils import FileManager\n\nclass CIBuild:\n BUIDINFO_FILENAME = \"build-info.properties\"\n\n def __init__(self, workDir, prodVersion, buildName, gitRemote=\"origin\"):\n self._workDir = workDir\n self._prodVersion = prodVersion\n self._buildName = buildName\n self._gitRemote = gitRemote\n \n def _saveBuildInfo(self, buildInfo):\n content = \"\"\n filepath = self._workDir + os.sep + self.BUIDINFO_FILENAME\n try:\n for key in buildInfo.keys():\n content = content + key + \"=\" + buildInfo[key] + \"\\n\"\n\n content.rstrip(\"\\n\")\n FileManager.saveTextFile(filepath, content)\n except Exception as err:\n raise CIBuildError(\"Failed on method _saveBuildInfo!\", err)\n\n def prebuild(self, saveBuildInfo=False):\n try:\n nextBN = self.getNextBuildNumber()\n currentCommit = self.getCurrentCommit()\n buildVersion = self._prodVersion + \"_b\" + str(nextBN)\n buildLabel = buildVersion\n self.createLabel(buildLabel, currentCommit)\n if(saveBuildInfo):\n buildInfo = {}\n buildInfo['build.name'] = self._buildName\n buildInfo['build.number'] = str(nextBN);\n buildInfo['build.version'] = buildVersion\n buildInfo['build.label'] = buildLabel\n buildInfo['build.commit'] = currentCommit\n self._saveBuildInfo(buildInfo)\n \n except Exception as err:\n raise CIBuildError(\"Failed on method prebuild!\", err)\n\n def createLabel(self, label, commit):\n try:\n cmdline = \"git tag \" + label + \" \" + commit\n cmd = CMDExecutor(cmdline, self._workDir)\n cmd.execute()\n cmdline = \"git push \" + self._gitRemote + \" \" + label\n cmd = CMDExecutor(cmdline, self._workDir)\n cmd.execute()\n except Exception as err:\n raise CIBuildError(\"Failed on method createLabel\", err)\n \n def getNextBuildNumber(self):\n iNextBN = 1\n cmdline = \"git tag -l \" + self._prodVersion + \"* --sort=-version:refname\"\n cmd = CMDExecutor(cmdline, self._workDir)\n try:\n output = cmd.execute()\n if(output):\n listOutput = output.split(\"\\n\")\n sLatestBN = re.sub(self._prodVersion+'_b', '', listOutput[0], flags=re.IGNORECASE)\n latestBN = int(sLatestBN)\n iNextBN = latestBN + 1\n except Exception as err:\n raise CIBuildError(\"Failed on method getNextBuildNumber!\", err)\n\n return iNextBN\n\n def getCurrentCommit(self):\n cmdline = \"git rev-parse HEAD\"\n cmd = CMDExecutor(cmdline, self._workDir)\n try:\n output = cmd.execute()\n return output.lstrip('\\n\\s').rstrip('\\n\\s')\n except Exception as err:\n raise CIBuildError(\"Failed on method getCurrentCommit!\", err)\n \nclass CIBuildError(CIBasicError):\n def __init__(self, errormsg, cause=None):\n CIBasicError.__init__(self, errormsg, cause)\n \n def __str__(self):\n return self.stackError\n \n def __repr__(self):\n return self.stackError\n \nif __name__ == \"__main__\":\n build = CIBuild(\"/Users/mike/Documents/MikeWorkspace/FreessureCoffee/service\", \"1.3.1\")\n build.getNextBuildNumber()","sub_path":"cikit/cibuild.py","file_name":"cibuild.py","file_ext":"py","file_size_in_byte":3556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"604677921","text":"#! /usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\n# @company: heetian\r\n# @file: set_nic_ip.py\r\n# @time: 2017/12/8 0008 下午 1:27\r\n# @author: xwh\r\n# @desc:\r\nimport os\r\n\r\n\r\ndef setNicIpAndPutInNetns(ip, nic_name, ns_name, mask):\r\n try:\r\n os.system(\"ip link set {nic_name} netns {ns_name}\".format(nic_name=nic_name, ns_name=ns_name))\r\n os.system(\"ip netns exec {ns_name} ip addr add {ip}/{mask} dev {nic_name}\".format(\r\n ns_name=ns_name, ip=ip, mask=mask, nic_name=nic_name))\r\n os.system(\"ip netns exec {ns_name} ip link set {nic_name} up\".format(ns_name=ns_name, nic_name=nic_name))\r\n except Exception as e:\r\n raise e\r\n","sub_path":"net_ns/set_nic_ip.py","file_name":"set_nic_ip.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"105609404","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nclass DevelopConfig(object):\n DEBUG = True\n SECRET_KEY = 'debug_secretkey'\n\nclass ProductionConfig(object):\n import os\n SECRET_KEY = os.environ.get('FLASK_SECRET_KEY')\n\nsettings = {\n 'develop': DevelopConfig,\n 'production': ProductionConfig,\n}\n","sub_path":"caprice/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"382889149","text":"import sys, json, requests\n\ndef main(table_json):\n endpoint = \"http://18.139.111.67:5000/initialise\"\n\n headers = {\"content-type\": \"application/json\"}\n\n # table_json = json.dumps(table_json)\n\n data = json.dumps({\n \"tables_json\": table_json\n })\n\n response = requests.post(endpoint, data=data, headers=headers)\n\n if response.ok:\n print('ok')\n else:\n print(response.text)\n\n\nif __name__ == \"__main__\":\n table_json = sys.argv[1]\n main(table_json)\n ","sub_path":"tablevision_initialiser/initialise.py","file_name":"initialise.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"307201673","text":"import os\nimport shutil\nfrom django.core.management import call_command\nfrom django.core.management.base import BaseCommand,CommandError\nfrom django.urls import reverse\nfrom django.test.client import Client\nfrom django.conf import settings\n\n\n\ndef get_pages():\n for name in os.listdir(settings.SITE_PAGES_DIRECTORY):#遍历pages 文件夹 收集.html文件\n if name.endswith('.html'): #endswith() 方法用于判断字符串是否以指定后缀结尾\n # 如果以指定后缀结尾返回True,否则返回False\n yield name[:-5]\n\nclass Command(BaseCommand):\n help='Build static site output'\n leave_locale_alone = True\n\n def add_arguments(self, parser): #检查是否有参数传入命令\n parser.add_argument('args',nargs='*')\n\n\n def handle(self, *args, **options):\n settings.DEBUG=False\n settings.COMPRESS_ENABLED=True\n\n if args:\n pages=args\n available=list(get_pages())\n print(available)\n invalid=[]\n for page in pages:\n if page not in available:\n invalid.append(page)\n if invalid:\n msg='Invalid pages: {}'.format(','.join(invalid))\n #如果某个文件不存在,则报错\n raise CommandError(msg)\n\n\n if os.path.exists(settings.SITE_OUTPUT_DIRECTORY):#检查输出目录是否存在\n # ,存在则删除并创建一个新的output目录\n shutil.rmtree(settings.SITE_OUTPUT_DIRECTORY) #递归的去删除文件\n os.mkdir(settings.SITE_OUTPUT_DIRECTORY)\n os.makedirs(settings.STATIC_ROOT,exist_ok=True) #创建递归的目录树\n\n call_command('collectstatic',interactive=False,clear=True,verbosity=0)\n call_command('compress',force=True)\n\n client=Client()\n for page in get_pages(): #遍历pages 文件夹 收集.html文件\n url=reverse('page',kwargs={'slug':page})\n response=client.get(url)\n\n\n if page == 'index':\n output_dir=settings.SITE_OUTPUT_DIRECTORY\n else:\n output_dir=os.path.join(settings.SITE_OUTPUT_DIRECTORY,page)\n os.makedirs(output_dir)\n with open(os.path.join(output_dir,'index.html'),'wb') as f:\n #将模板修改为静态内容, 模拟朱雀网站页面并将修饰过的内容写入site_output_directory\n\n f.write(response.content)\n","sub_path":"sitebuilder/management/commands/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"547693046","text":"import math\r\n# 1996 Pacejka R25B_13 Lateral Force\r\n# input normal load on tire, slip angle, camber angle, gives lateral force\r\n# on tire y axis\r\n\r\n#positive alpha = positive force\r\n#negative camber = \"good\" camber\r\n\r\n#Cyrus\r\n#I have no clue what's happening here, but I believe dynamics is currently working on something similar to this for us\r\n\r\ndef calc_lat_force(f_z, a, gamma):\r\n\r\n #fz- normal force\r\n #fz0 - \r\n fz = f_z*4.448 #lb to N\r\n\r\n fz0 = 663.94 #N\r\n pcy1 = 1.377528\r\n pdy1 = 2.4860\r\n pdy2 = -.150167\r\n pdy3 = -1.88895\r\n pey1 = -.000043\r\n pey2 = .000007\r\n pey3 = -3683.9043\r\n pey4 = -15729.78\r\n pky1 = -114.08707\r\n pky2 = -3.621914\r\n pky3 = 2.51886\r\n phy1 = .002182\r\n phy2 = -.001398\r\n phy3 = -.119546\r\n pvy1 = .026184\r\n pvy2 = -.029205\r\n pvy3 = .034106\r\n pvy4 = .424675\r\n\r\n dfz = (fz-fz0)/fz0\r\n svy = fz*(pvy1+phy2*dfz+(pvy3+pvy4*dfz)*gamma)\r\n shy = phy1+phy2*dfz+phy3*gamma\r\n cy = pcy1\r\n uy = (pdy1+pdy2*dfz)*(1-pdy3*gamma**2)\r\n dy = uy*fz\r\n kya = pky1*fz0*math.sin(2*math.atan(fz/(pky2*fz0)))*(1-pky3*abs(gamma))\r\n by = kya/(cy*dy)\r\n ay = a+shy\r\n\r\n if ay < 0:\r\n sign_ay = -1\r\n elif ay == 0:\r\n sign_ay = 0\r\n else:\r\n sign_ay = 1\r\n\r\n ey = (pey1+pey2*dfz)*(1-(pey3+pey4*gamma))*sign_ay\r\n fy0 = -(dy*math.sin(cy*math.atan(by*ay-ey*(by*ay-math.atan(by*ay))))+svy)\r\n\r\n lat_force = fy0/4.448\r\n\r\n return lat_force\r\n\r\n#print(calc_lat_force(.5,.5,.5))","sub_path":"lapsim/calc_lat_force.py","file_name":"calc_lat_force.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"510249705","text":"import os.path\n\nfrom pip._vendor.distlib.compat import raw_input\n\nsave_path = '/Users/vidyaprabhakarsristi/Desktop'\n\nname_of_file = raw_input(\"What is the name of the file: \")\n\ncompleteName = os.path.join(save_path, name_of_file + \".txt\")\n\nfile1 = open(completeName, \"w\")\n\ntoFile = raw_input(\"Write what you want into the field\")\n\nfile1.write(toFile)\n\nfile1.close()\n","sub_path":"LeetCode/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"547985080","text":"import os\nimport cv2\nimport numpy as np\nimport collections\nfrom scipy.spatial.distance import cdist\n\ndef loadImgs(filepath):\n img_list=[]\n\n class_names=os.listdir(filepath)\n for class_name in class_names:\n class_path = os.path.join(filepath,class_name)\n image_names = os.listdir(class_path)\n for image_name in image_names:\n try:\n img_path = os.path.join(class_path,image_name)\n img = cv2.imread(img_path)\n img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n img_list.append(img)\n except:\n pass\n\n return img_list\n\ndef get_all_features(img_list):\n \"\"\"\n :param imgs: a list of images\n :return: (#keypoints) ndarray, (#keypoints from all images,128) ndarray\n \"\"\"\n sift = cv2.xfeatures2d.SIFT_create()\n keypoints=None\n describes=None\n for img in img_list:\n kp,des=sift.detectAndCompute(img,None)\n if des is not None:\n describes=np.vstack((describes,des)) if describes is not None else des\n if kp is not None:\n keypoints=np.append(keypoints,kp) if keypoints is not None else kp\n return keypoints,describes\n\ndef img2histogram(img,centers):\n \"\"\"represent img by frequencies of visual words\n :param img: an (200x200) ndarray img\n :param centers: (k=200,128) center points\n :return: (200)\n \"\"\"\n cluster=len(centers)\n sift = cv2.xfeatures2d.SIFT_create()\n kp,des=sift.detectAndCompute(img,None)\n distances=cdist(des,centers,'euclidean')\n counter=collections.Counter(np.argmin(distances,axis=1))\n re=np.zeros(cluster)\n for i in counter:\n re[i]=counter[i]\n\n return re\n\ndef get_histograms(img_list,cluster,centers):\n histograms=np.zeros((len(img_list),cluster))\n for i,img in enumerate(img_list):\n histograms[i]=img2histogram(img,centers)\n\n return histograms\n\ndef knn(indices):\n \"\"\"\n :param indics: (#test images,k) ndarray\n \"\"\"\n indices = indices // 100\n k=indices.shape[1]\n acc=0\n for i in range(len(indices)):\n target_class=i//10\n predict_class=collections.Counter(indices[i]).most_common(1)[0][0]\n if target_class==predict_class:\n acc+=1\n\n return acc/len(indices)\n\ntrain_path=os.path.join('hw5_data','train')\ntest_path=os.path.join('hw5_data','test')\ncategory=list(os.listdir(train_path))\ncluster=100\nk=7\n\nif __name__=='__main__':\n # load images\n train_imgs=loadImgs(train_path)\n test_imgs=loadImgs(test_path)\n\n cats_dict = {c: dict.fromkeys(['kp', 'des']) for c in category}\n min_features=1000000000\n for i,c in enumerate(category):\n keypoints,describes=get_all_features(train_imgs[i*100:i*100+100])\n cats_dict[c]['kp']=keypoints\n cats_dict[c]['des']=describes\n if len(keypoints) 0:\r\n if self.sub_count == self.num_of_k_samples:\r\n break\r\n self.sub_count += 1\r\n with urllib.request.urlopen(data[1]) as url:\r\n data_json = json.loads(url.read().decode())\r\n for r, i in enumerate(data_json['data']):\r\n r = len(text) + 1\r\n topic.append([r, i['subreddit']])\r\n if 'media' in i.keys():\r\n if i['media'] is not None and 'oembed' in i['media'].keys():\r\n if 'description' in i['media']['oembed'].keys() & i['selftext']:\r\n text.append([r,\r\n i['title'] + ' ' + i['media']['oembed']['description'] + ' ' + i[\r\n 'selftext'], i['subreddit']])\r\n elif 'description' in i['media']['oembed']:\r\n text.append(\r\n [r, i['title'] + ' ' + i['media']['oembed']['description'], i['subreddit']])\r\n else:\r\n text.append([r, i['selftext'], i['subreddit']])\r\n elif 'selftext' in i.keys():\r\n text.append([r, i['title'] + ' ' + i['selftext'], i['subreddit']])\r\n print(len(data[0]))\r\n print(str(datetime.datetime.fromtimestamp(data[0][-1]['created_utc'])))\r\n self.after = data[0][-1]['created_utc']\r\n data = self.get_push_shift_data(s)\r\n self.sub_count = 0\r\n df = pd.DataFrame(text, columns=['Index', 'Text', 'Topic'])\r\n df = df[['Text', 'Topic']]\r\n return df","sub_path":"reddit_scrapper.py","file_name":"reddit_scrapper.py","file_ext":"py","file_size_in_byte":3607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"599147717","text":"import sys\nimport os\n\nfrom PyQt5 import uic\nfrom PyQt5.QtWidgets import QApplication, QMainWindow\nmypath = os.path.dirname(__file__)\n_basicDataParentFrameUI, _basicDataParentFrame = \\\n uic.loadUiType(os.path.join(mypath, \"basicDataParentFrame_UI.ui\"))\n\nclass basicDataParentFrame(_basicDataParentFrame, _basicDataParentFrameUI):\n format = '%.5f' # numeric format for table entries in UQ Toolbox\n\n def __init__(self, parent=None):\n super(basicDataParentFrame, self).__init__(parent)\n self.parent = parent\n self.setupUi(self)\n self.dmfGroup.hide()\n self.solventFitFrame.init(parent=self)\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n MainWindow = QMainWindow(parent=None)\n\n MainFrame = basicDataParentFrame()\n MainWindow.setCentralWidget(MainFrame)\n\n MainWindow.show()\n sys.exit(app.exec_())\n","sub_path":"foqus_lib/gui/basic_data/basicDataParentFrame.py","file_name":"basicDataParentFrame.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"632185371","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nfrom heapq import *\nfrom queue import PriorityQueue\n\n\nclass Solution:\n def mergeKLists2(self, lists):\n \"\"\"\n :type lists: List[ListNode]\n :rtype: ListNode\n \"\"\"\n if not lists: return None\n\n pq = []\n\n for head in lists:\n if head:\n heappush(pq, (head.val, head))\n\n dummy = ListNode(0)\n cur = dummy\n\n while pq:\n temp = heappop(pq)[1]\n\n cur.next = temp\n cur = cur.next\n\n if temp.next:\n heappush(pq, (temp.next.val, temp.next))\n\n return dummy.next\n\n def mergeKLists(self, lists):\n\n if not lists: return None\n pq = PriorityQueue()\n for head in lists:\n if head:\n pq.put((head.val, head))\n dummy = ListNode(0)\n cur = dummy\n while not pq.empty():\n temp = pq.get()[1]\n cur.next = temp\n cur = cur.next\n if temp.next:\n pq.put((temp.next.val, temp.next))\n\n return dummy.next","sub_path":"python/23. Merge k Sorted Lists-old.py","file_name":"23. Merge k Sorted Lists-old.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"6601616","text":"# Time: O(1)\n# Space: O(1)\n\n# 949\n# Given an array of 4 digits, return the largest 24 hour time that can be made.\n#\n# The smallest 24 hour time is 00:00, and the largest is 23:59. Starting from 00:00,\n# a time is larger if more time has elapsed since midnight.\n#\n# Return the answer as a string of length 5. If no valid time can be made, return an empty string.\n\nimport itertools\n\n\nclass Solution(object):\n def largestTimeFromDigits(self, A): # USE THIS\n \"\"\"\n :type A: List[int]\n :rtype: str\n \"\"\"\n A.sort(reverse=True) # optimization for early return\n for h1, h2, m1, m2 in itertools.permutations(A):\n hours = 10*h1 + h2\n mins = 10*m1 + m2\n if 0 <= hours < 24 and 0 <= mins < 60:\n return \"{:02}:{:02}\".format(hours, mins)\n return ''\n\n # backtracking to generate own permutation, compare every possible time string.\n def largestTimeFromDigits_backtrack(self, A):\n def backtrack(cur):\n if len(cur) == 4:\n perm.append(cur[:])\n else:\n for i in range(4):\n if not used[i]:\n used[i] = True\n cur.append(A[i])\n backtrack(cur)\n cur.pop()\n used[i] = False\n\n mx, ans = -1, None\n used = [False] * 4\n perm = []\n backtrack([])\n for B in perm:\n h = 10*B[0] + B[1]\n m = 10*B[2] + B[3]\n if 0 <= h < 24 and 0 <= m < 60 and 60*h+m > mx:\n mx, ans = 60*h+m, B\n return \"{}{}:{}{}\".format(ans[0],ans[1],ans[2],ans[3]) if mx != -1 else ''\n\n\n # VERY HARD to write code for picking up valid digits for each place. And the following\n # is still wrong which returns '' for [2,0,6,6] (tried to make '20:xx')\n def largestTimeFromDigits_wrong(self, A):\n h, m = '', ''\n if 2 not in A and 1 not in A and 0 not in A:\n return ''\n if 2 in A:\n h = '2'\n A.remove(2)\n if 3 not in A and 2 not in A and 1 not in A and 0 not in A:\n return ''\n if 3 in A:\n h = '23:'\n A.remove(3)\n elif 2 in A:\n h = '22:'\n A.remove(2)\n elif 1 in A:\n h = '21:'\n A.remove(1)\n elif 0 in A:\n h = '20:'\n A.remove(0)\n elif 1 in A:\n h = '1'\n A.remove(1)\n elif 0 in A:\n h = '0'\n A.remove(0)\n\n less6 = []\n for a in A:\n if a <= 5: less6.append(a)\n if len(less6) == 0:\n return ''\n elif len(less6) == 1:\n m = str(less6[0])\n A.remove(less6[0])\n if len(A) == 2:\n return h+str(max(A))+':'+m+str(min(A))\n else:\n return h+m+str(A[0])\n else:\n A.sort()\n if len(A) == 3:\n return h+str(A[2])+':'+str(A[1])+str(A[0])\n else:\n return h+str(A[1])+str(A[0])\n\nprint(Solution().largestTimeFromDigits([2,0,6,6])) # 06:26\nprint(Solution().largestTimeFromDigits([1,2,3,4])) # 23:41\nprint(Solution().largestTimeFromDigits([5,5,5,5])) # ''\nprint(Solution().largestTimeFromDigits([1,6,3,9])) # 19:36\nprint(Solution().largestTimeFromDigits([1,5,3,9])) # 19:53\nprint(Solution().largestTimeFromDigits([3,2,7,0])) # 23:07","sub_path":"Python/largest-time-for-given-digits.py","file_name":"largest-time-for-given-digits.py","file_ext":"py","file_size_in_byte":3518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"273863072","text":"#!/usr/bin/env python3\n\"\"\"Using pytest-flask to test Flask app\"\"\"\n\nimport os\nimport subprocess\nimport pytest\nfrom playwright.sync_api import Page\n\n\ndef setup_module(module):\n \"\"\"Create the server fixture\"\"\"\n os.chdir(\"exercises/jokes\")\n module.server = subprocess.Popen([\"flask\", \"run\"])\n try:\n module.server.wait(timeout=1)\n except subprocess.TimeoutExpired:\n pass\n\n\ndef teardown_module(module):\n module.server.terminate()\n\n\ndef test_click_button(page: Page):\n page.goto(\"http://localhost:5000/\")\n page.click(\"#btnAmuse\")\n assert len(page.querySelectorAll(\"#jokes > p\")) == 1\n\n\n@pytest.mark.parametrize(\"language\", [\"de\", \"en\", \"es\"])\ndef test_select_language(page: Page, language):\n page.goto(\"http://localhost:5000/\")\n page.selectOption(\"#selLang\", language)\n page.click(\"#btnAmuse\")\n assert len(page.querySelectorAll(\"#jokes > p\")) == 1\n\n\n@pytest.mark.parametrize(\"category\", [\"all\", \"chuck\", \"neutral\"])\ndef test_select_category(page: Page, category):\n page.goto(\"http://localhost:5000/\")\n page.selectOption(\"#selCat\", category)\n page.click(\"#btnAmuse\")\n assert len(page.querySelectorAll(\"#jokes > p\")) == 1\n\n\ndef test_select_chuck_in_spanish(page: Page):\n page.goto(\"http://localhost:5000/\")\n page.selectOption(\"#selCat\", \"chuck\")\n page.selectOption(\"#sellang\", \"es\")\n page.click(\"#btnAmuse\")\n assert len(page.querySelectorAll(\"#jokes > p\")) == 1\n\n\n@pytest.mark.skip\n@pytest.mark.parametrize(\"number\", [1, 5, 10])\ndef test_select_number(page: Page, number):\n page.goto(\"http://localhost:5000/\")\n page.selectOption(\"#selNum\", str(number))\n page.click(\"#btnAmuse\")\n assert len(page.querySelectorAll(\"#jokes > p\")) == number\n\n\nif __name__ == \"__main__\":\n pytest.main([\"-v\", \"test_jokes_front.py\"])\n","sub_path":"tests/jokes/test_jokes_front.py","file_name":"test_jokes_front.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"644031775","text":"# -*- coding:utf-8 -*-\n\nfrom openerp import SUPERUSER_ID\nfrom openerp.http import request\nfrom openerp import http\nfrom werkzeug import utils\n\n\nclass TelegramLogin(http.Controller):\n\n @http.route('/web/login/telegram', type='http', auth='user')\n def do_login(self, *args, **kw):\n token = kw['token']\n command_ids = request.env['telegram.command'].search([('name', '=', '/login')]).ids\n\n tsession = request.env['telegram.session'].sudo().search([('token', '=', token)])\n if not tsession:\n return utils.redirect('/web')\n\n tsession.user_id = request.env.uid\n\n message = {'action': 'send_notifications',\n 'command_ids': command_ids,\n 'tsession_id': tsession.id}\n request.env['telegram.bus'].sendone('telegram_channel', message)\n return utils.redirect('/web')\n","sub_path":"telegram/controllers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"445462715","text":"import os\nimport cv2\nimport copy\nimport math\nimport time\nimport torch\nimport joblib\nimport random\nimport argparse\nimport numpy as np\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\n\nfrom tqdm import tqdm\nfrom vae_conv import VAE\nimport matplotlib.pyplot as plt\nfrom torchvision import transforms\nfrom torch.autograd import Variable\nfrom torch.optim import lr_scheduler\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.dataset import Dataset\n\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nmean = [0.485, 0.456, 0.406]\nstd = [0.229, 0.224, 0.225]\n\n\ndef imgProc(img_path, is_target):\n\tori_img = cv2.imread(img_path)\n\tori_img = cv2.resize(ori_img, (224, 224))\n\timg = ori_img.copy().astype(np.float32)\n\n\timg /= 255.0\n\tif not is_target:\n\t\timg = (img - mean)/std\n\timg = img.transpose(2, 0, 1)\n\timg_ts = Variable(torch.from_numpy(img).type(torch.float)).to(device)\n\n\treturn img_ts\n\n\nclass ImageDataset(Dataset):\n\t'''\n\tclean_dir:\n\t\t(str) path of the root folder of all clean images\n\tadv_dir:\n\t\t(str) path to the root folder of all attacked images\n\tattack_type:\n\t\t(str) type of attack name\n\ttransform:\n\t\t(transforms) transforms for data\n\tis_train:\n\t\t(bool) if the data for training or test\n\t'''\n\tdef __init__(self, clean_dir, adv_dir, attack_type, setName):\n\t\tself.ori_fileList = []\n\t\tself.adv_fileList = []\n\t\tori_folder = os.path.join(clean_dir, setName)\n\t\tadv_folder = os.path.join(adv_dir, attack_type, setName)\n\t\tfor path, subdirs, files in os.walk(ori_folder):\n\t\t\tfor f in files:\n\t\t\t\tself.ori_fileList.append(os.path.join(path, f))\n\t\t\t\tadv_fname = 'adv_' + f\n\t\t\t\tself.adv_fileList.append(os.path.join(adv_folder, adv_fname))\n\t\tassert len(self.ori_fileList)==len(self.adv_fileList)\n\n\tdef __len__(self):\n\t\treturn len(self.ori_fileList)\n\n\tdef __getitem__(self, index):\n\t\tx = imgProc(self.adv_fileList[index], is_target=True)\n\t\ty = imgProc(self.ori_fileList[index], is_target=True)\n\t\t# Assertions\n\t\t_adv = '_'.join(self.adv_fileList[index].split('/')[-1].split('_')[1:])\n\t\t_ori = self.ori_fileList[index].split('/')[-1]\n\t\tassert (_adv == _ori)\n\t\tassert x.shape == y.shape\n\t\tassert (x - y).abs().sum() > 0\n\t\treturn x, y\n\n\ndef visualResults(adv_img, rec_img, tar_img, epoch):\n\n\tassert adv_img.shape == rec_img.shape\n\tassert rec_img.shape == tar_img.shape\n\tadv = adv_img.data.cpu()\n\t#adv = adv.mul(torch.FloatTensor(std).view(3, 1, 1)).add(torch.FloatTensor(mean).view(3,1,1)).detach().numpy()\n\tadv = np.transpose(adv, (1,2,0)) # C X H X W ==> H X W X C\n\tadv = np.clip(adv, 0, 1)\n\n\trec = rec_img.data.cpu().numpy()\n\trec = np.transpose(rec, (1,2,0))\n\trec = np.clip(rec, 0, 1)\n\n\ttar = tar_img.data.cpu().numpy()\n\ttar = np.transpose(tar, (1,2,0))\n\ttar = np.clip(tar, 0, 1)\n\n\tfigure, ax = plt.subplots(1,3)\n\tax[0].imshow(adv)\n\tax[0].set_title('input adv_img')\n\tax[0].axis('off')\n\tax[1].imshow(rec)\n\tax[1].set_title('output rec_img')\n\tax[1].axis('off')\n\tax[2].imshow(tar)\n\tax[2].set_title('target clean_img')\n\tax[2].axis('off')\n\n\toutPath = './trained_records/figures/'\n\toutName = 'vae_val_e{}.png'.format(epoch)\n\tif not os.path.isdir(outPath):\n\t\tos.mkdir(outPath)\n\tplt.savefig(os.path.join(outPath, outName))\n\tprint ('save fig to: {}'.format(os.path.join(outPath, outName)))\n\n\ndef train(clean_dir, adv_dir, attack_type):\n\t'''\n\tclean_dir:\n\t\t(str) path of the root folder of all clean images\n\tadv_dir:\n\t\t(str) path to the root folder of all attacked images\n\tattack_type:\n\t\t(str) type of attack name\n\t'''\n\t# Ignore all warnings\n\timport warnings\n\twarnings.filterwarnings(\"ignore\")\n\n\t# Setup Model hyer-param\n\tz_size = 2048\n\thidden_dim = 64\n\tdrop_p = 0.5\n\timage_size = 224\n\tchannel_num = 3\n\tis_res = True\n\n\t# Set up training hyer-params\n\tlr = 1e-3\n\tweight_decay = 1e-5\n\tbatch_size = 64\n\tnum_epochs = 50\n\tbeta = 1\n\tvisual_interval = 2\n\tbest_loss = math.inf\n\tloss_record = {'train': {'total_loss': [], 'rec_loss':[], 'kl_loss':[]},\n \t\t\t\t 'val': {'total_loss': [], 'rec_loss':[], 'kl_loss':[]}}\n\n\tdataset = {x: ImageDataset(clean_dir, adv_dir, attack_type, x) for x in ['train', 'val']}\n\tdataset_sizes = {x: len(dataset[x]) for x in ['train', 'val']}\n\tprint('Dataset size: train {}, val {}'.format(dataset_sizes['train'], dataset_sizes['val']))\n\n\tdataloaders = {'train': DataLoader(dataset['train'], batch_size=batch_size, shuffle=True, num_workers=0),\n 'val' : DataLoader(dataset['val'], batch_size=batch_size, shuffle=False, num_workers=0)}\n\n # Initialize VAE model, optimizer and scheduler\n\tmodel = VAE(image_size, channel_num, hidden_dim, z_size, is_res, drop_p).to(device)\n\toptimizer = optim.Adam(model.parameters(), lr=lr, betas=(0.9, 0.999), weight_decay=weight_decay)\n\tscheduler = lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.1, patience=5, threshold=1e-7)\n\n # Training\n\tprint ('Start training on {}...'.format(device))\n\tsince = time.time()\n\n\tcounter = 0\n\tfor epoch in range(num_epochs):\n\t\tprint('\\nEpoch {}/{}, lr: {}, wd: {}'.format(epoch + 1, num_epochs,\n\t\t\t optimizer.param_groups[0]['lr'], weight_decay))\n\t\tprint('-' * 30)\n\n\t\t# early stop counter\n\t\tif optimizer.param_groups[0]['lr'] < 1e-6:\n\t\t\tcounter += 1\n\t\tif counter >= 5:\n\t\t\tbreak\n\n\t\tfor phase in ['train', 'val']:\n\t\t\tif phase == 'train':\n\t\t\t\tmodel.train()\n\t\t\telse:\n\t\t\t\tmodel.eval()\n\n # Initial running loss\n\t\t\trunning_total_loss = 0.0\n\t\t\trunning_rec_loss = 0.0\n\t\t\trunning_kl_loss = 0.0\n\n\t\t\tfor inputs, targets in tqdm(dataloaders[phase], desc='{} iterations'.format(phase), leave=False):\n\t\t\t\tinputs = inputs.to(device)\n\t\t\t\ttargets = targets.to(device)\n \t# forward-prop\n\t\t\t\twith torch.set_grad_enabled(phase == 'train'):\n\t\t\t\t\t(mean, logvar), reconstructed = model(inputs)\n\t\t\t\t\trec_loss = model.reconstruction_loss(reconstructed, targets)\n\t\t\t\t\tkl_loss = model.kl_divergence_loss(mean, logvar)\n\t\t\t\t\ttotal_loss = rec_loss + beta * kl_loss\n\n # backward + optimize only if in training phase\n\t\t\t\t\tif phase == 'train':\n\t\t\t\t\t\t# zero the parameter gradients\n\t\t\t\t\t\toptimizer.zero_grad()\n\t\t\t\t\t\t# backward-prop\n\t\t\t\t\t\ttotal_loss.backward()\n\t\t\t\t\t\toptimizer.step()\n\n\t\t\t\t# compute loss for running loss\n\t\t\t\trunning_kl_loss += kl_loss.item() * inputs.size(0)\n\t\t\t\trunning_rec_loss += rec_loss.item() * inputs.size(0)\n\t\t\t\trunning_total_loss += total_loss.item() * inputs.size(0)\n\n\t\t\t# Compute epoch loss\n\t\t\tepoch_kl_loss = running_kl_loss / dataset_sizes[phase]\n\t\t\tepoch_rec_loss = running_rec_loss / dataset_sizes[phase]\n\t\t\tepoch_total_loss = running_total_loss / dataset_sizes[phase]\n\n\t\t\t# Update loss records\n\t\t\tloss_record[phase]['total_loss'].append(epoch_total_loss)\n\t\t\tloss_record[phase]['rec_loss'].append(epoch_rec_loss)\n\t\t\tloss_record[phase]['kl_loss'].append(epoch_kl_loss)\n\n\t\t\t# Output training/val results\n\t\t\tprint('{} Loss: total: {:.4f}, rec_loss: {:.4f}, kl_loss: {:.4f}'\n\t\t\t\t.format(phase, epoch_total_loss, epoch_rec_loss, epoch_kl_loss))\n\n\t\t\t# Save images\n\t\t\tif (epoch+1) % visual_interval == 0 and epoch > 0 and phase == 'val':\n\t\t\t\trndIdx = random.randint(0, inputs.size(0)-1)\n\t\t\t\tprint ('Save reconstructed images, random index={} in the last batch'.format(rndIdx))\n\t\t\t\tvisualResults(inputs[rndIdx], reconstructed[rndIdx], targets[rndIdx], epoch+1)\n\n\t\t\t# Step optimizer scheduler\n\t\t\tif phase == 'val':\n\t\t\t\tscheduler.step(epoch_total_loss)\n\n\t\t\t# Copy best model\n\t\t\tif phase == 'val' and epoch_total_loss < best_loss:\n\t\t\t\tbest_loss = epoch_total_loss\n\t\t\t\tbest_model_wts = copy.deepcopy(model.state_dict())\n\n\t# End of training, return the best model\n\ttime_elapsed = time.time() - since\n\tprint('\\nTraining complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))\n\tprint('Best val loss: {}'.format(best_loss))\n\n\t# Save the best weights and loss_records\n\tsave_path = './trained_weights/'\n\tif not os.path.isdir(save_path):\n\t\tos.mkdir(save_path)\n\tweight_fname = 'vae_{}_zdim{}_hdim{}_e{}_lr{}.torch'.format(attack_type, z_size, hidden_dim, num_epochs, str(lr).split('.')[-1])\n\ts_path = os.path.join(save_path, weight_fname)\n\ttorch.save(best_model_wts, s_path)\n\tprint ('Best weight save to:', s_path)\n\n\tsave_path = './trained_records/'\n\tif not os.path.isdir(save_path):\n\t\tos.mkdir(save_path)\n\tweight_fname = 'vae_{}_zdim{}_hdim{}_e{}_lr{}.pkl'.format(attack_type, z_size, hidden_dim, num_epochs, str(lr).split('.')[-1])\n\ts_path = os.path.join(save_path, weight_fname)\n\ttorch.save(best_model_wts, s_path)\n\tprint ('Training records save to:', s_path)\n\n\ndef main(args):\n\tclean_dir = args.clean_dir\n\tadv_dir = args.adv_dir\n\tattack_type = args.attack_type\n\ttrain(clean_dir, adv_dir, attack_type)\n\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--clean_dir', type=str, required=True, help='root folder of the clean images')\n\tparser.add_argument('--adv_dir', type=str, required=True, help='root folder of the adversarial images')\n\tparser.add_argument('--attack_type', type=str, required=True, help='type of attacks, e.g. fgsm, b_iter')\n\targs = parser.parse_args()\n\tmain(args)","sub_path":"attack_defense/VAE/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"151323152","text":"import spn\n#from spn.structure.leaves.mvgauss.MVG import *\nfrom spn.io.Text import *\nimport sys\nfrom spn.structure.leaves.parametric.Parametric import *\nfrom spn.structure.leaves.parametric.MLE import *\nfrom spn.algorithms.MPE import mpe\nfrom spn.structure.prometheus.disc import *\nfrom scipy.stats import multivariate_normal as mn\n#from spn.structure.prometheus.disc import *\n\nnode = MultivariateGaussian(np.inf, np.inf)\ndata = np.array([1, 2, 3, 4, 5, 1, 2, 3, 4, 1, 3, 3, 6, 2]).reshape(-1, 2)\nupdate_parametric_parameters_mle(node, data)\n\nprint(node.mean, node.sigma)\n\nprint(node.scope)\n\ndummydata = np.asarray([[1, 2, 4, 8], [2.1, 4.1, 8.1, 16.1], [\n 4.1, 8.1, 16.1, 32.1], [8.8, 16.5, 32.3, 64.2]])\ndummyscope = list([0, 1, 2, 3])\n\nspn = MultivariateGaussian(np.inf, np.inf)\n\nupdate_parametric_parameters_mle(spn, dummydata)\n\nprint(spn.mean)\nprint(spn.sigma)\n\nspn.scope = dummyscope\n\n#print(mn.pdf(spn.mean, spn.mean, spn.cov))\n\nprint(spn.scope)\n\ndummydata = np.asarray([[np.nan, 2.0, np.nan, np.nan],\n [np.nan, np.nan, np.nan, 64.3]])\n\nprint(np.shape(dummydata))\nprint(np.shape(np.asarray(spn.mean)))\nprint(np.shape(np.asarray(spn.sigma)))\n\nprint(mpe(spn, dummydata))\n\nprint(spn_to_str_equation(spn))\n\nrecreate = (str_to_spn(spn_to_str_equation(spn)))\n\nprint(spn_to_str_equation(recreate))\n\nprint(recreate.mean)\nprint(recreate.sigma)\n\narr = np.load('./test.npy')\nteststruct = prometheus(arr, 1, itermult=0, leafsize=4, maxsize=6)\n\ntestspn = str_to_spn(teststruct)\n\nrecreate = spn_to_str_equation(testspn)\n\nfile = open('./ca.txt', 'w')\nfile.write(teststruct)\nfile.close()\n","sub_path":"src/spn/tests/prometheus_tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"343081061","text":"# 发送单条短信\n\nfrom schema import Optional, Schema\n\nfrom src.apps.models.sms import SmsLog\nfrom src.comm.logger import logger\nfrom src.comm.model_resource import SQLModelSchemaResource\nfrom src.config.msgconfig import Msg\n\nfrom . import XwSmsSingleSender\n\n\nclass SmsSentSingleAPI(SQLModelSchemaResource):\n \"\"\"单发短信\"\"\"\n\n model = SmsLog\n allow_methods = [\"post\"]\n validate_schemas = {\n \"post\": Schema(\n {\n \"receiver\": str,\n \"params\": list,\n \"template_type_code\": int,\n \"merchant_code\": str,\n Optional(\"production_code\"): str,\n }\n )\n }\n\n def post(self):\n\n receiver = self.validate_data.get(\"receiver\")\n params = self.validate_data.get(\"params\")\n template_type_code = self.validate_data.get(\"template_type_code\")\n merchant_code = self.validate_data.get(\"merchant_code\")\n production_code = self.validate_data.get(\"production_code\", \"\")\n sms_sent_multiple = XwSmsSingleSender(\n merchant_code, production_code, template_type_code\n )\n is_success, msg = sms_sent_multiple.send_single_sms(receiver, params)\n if not isinstance(msg, tuple):\n return msg\n if is_success:\n return {\"serial_number\": msg[1]}\n logger.info(msg[0])\n return Msg.SMS_SENDER_FAILED\n","sub_path":"xxw/chaos/src/apps/handlers/sms/sent_single.py","file_name":"sent_single.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"137893047","text":"# Crash Course Text Project 1 - 11th Iteration:\n\n''' Building the alien fleet:\n\nStart by figuring out how much horizontal space we have:\n\n The screen width is stored in ai_settings.screen_width, but we need empty\n margins on each side.\n \n We make this margin the width of one alien.\n \n The available space for aliens is the screen width - 2 alien widths:\n \n available_space_x = ai_settings.screen.width - (2 * alien_width)\n \n We also have to set the spacing between aliens. The space to display one\n alien is twice its width. One to display the alien and one to create an\n empty space to the right of it.\n \n Find the number of aliens: divide the available space by two times the \n width of an alien:\n \n number_aliens_x = available_space_x / (2 * alien_width)\n \n We start to create a row by creating an empty group in the main class: '''\n\nimport pygame\nfrom pygame.sprite import Group\nfrom settings import Settings\nfrom ship import Ship\n# from alien import Alien\n\n''' No longer need to import the alien class in because we are not creating\n aliens directly inside the alien_invasion class. '''\n\nimport game_functions as gf\n\ndef run_game():\n\n pygame.init()\n ai_settings = Settings()\n screen = pygame.display.set_mode(\n (ai_settings.screen_width, ai_settings.screen_height))\n pygame.display.set_caption(\"Alien Invasion\")\n ship = Ship(ai_settings, screen)\n bullets = Group()\n \n ''' Create a new group to store a group of aliens: '''\n aliens = Group()\n \n ''' Create a fleet of aliens '''\n gf.create_fleet(ai_settings, screen, aliens)\n\n while True:\n\n gf.check_events(ai_settings, screen, ship, bullets)\n ship.update()\n \n gf.update_bullets(bullets)\n \n ''' Add alien fleet to screen update ''' \n gf.update_screen(ai_settings, screen, ship, aliens, bullets)\n\nrun_game()\n","sub_path":"Python_Crash_Course_text/Crash_Course_Text_Projects/11_ver_Project_01/alien_invasion.py","file_name":"alien_invasion.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"621794419","text":"import io\n\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sn\nimport torch\nfrom detectron2.data import MetadataCatalog, build_detection_test_loader, DatasetMapper, DatasetCatalog\nfrom detectron2.data import transforms as T\nfrom detectron2.engine import HookBase\n\nfrom src.visualization import create_confusion_matrix\n\n\nclass ConfusionHook(HookBase):\n def __init__(\n self,\n data_loader,\n n,\n threshold: float = 0.75,\n ) -> None:\n self.data_loader = data_loader\n self.n = n\n self.threshold = threshold\n\n assert 0 < self.threshold < 1\n\n def after_step(self):\n next_iter = self.trainer.iter + 1\n is_final = next_iter == self.trainer.max_iter\n eval_period = self.trainer.cfg.TEST.EVAL_PERIOD * 2\n is_eval_period = eval_period > 0 and next_iter % eval_period == 0\n\n if is_eval_period or is_final:\n print(\"@@@@\", is_eval_period, eval_period, next_iter, \"@@@@\")\n self._preform()\n\n def _preform(self):\n self.trainer.model.eval()\n meta_data = MetadataCatalog.get(self.trainer.cfg.DATASETS.TEST[0])\n loader = iter(self.data_loader)\n\n ground_truths = []\n predictions = []\n\n with torch.no_grad():\n for i in range(self.n):\n inputs = next(loader)\n ground_truths.append(inputs[0])\n\n outputs = self.trainer.model(inputs)[0][\"instances\"].to(\"cpu\")\n outputs = outputs[outputs.scores > self.threshold]\n predictions.append(outputs)\n\n m, labels = create_confusion_matrix(ground_truths, predictions, meta_data)\n df = pd.DataFrame(\n m,\n index=list(map(lambda x: \"Pred \" + x, labels)),\n columns=list(map(lambda x: \"GT \" + x, labels))\n )\n\n title = f\"Confusion matrix {self.trainer.cfg.DATASETS.TEST[0]}\"\n plt.figure(figsize=(7, 7))\n sn.heatmap(df, annot=True)\n\n plt.title(title)\n buf = io.BytesIO()\n plt.savefig(buf, format='jpg')\n buf.seek(0)\n image = cv2.imdecode(np.frombuffer(buf.getvalue(), np.uint8), -1)\n image = image.transpose(2, 0, 1)\n\n self.trainer.storage.put_image(title, image)\n self.trainer.model.train()\n plt.close()\n\n @classmethod\n def create(cls, cfg, *, threshold=0.75):\n mapper = DatasetMapper(\n is_train=True,\n augmentations=[\n T.ResizeShortestEdge(\n short_edge_length=cfg.INPUT.MIN_SIZE_TEST,\n max_size=cfg.INPUT.MAX_SIZE_TEST,\n sample_style=cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING)\n ],\n image_format=cfg.INPUT.FORMAT,\n use_instance_mask=cfg.MODEL.MASK_ON,\n instance_mask_format=cfg.INPUT.MASK_FORMAT,\n use_keypoint=cfg.MODEL.KEYPOINT_ON,\n recompute_boxes=True,\n )\n\n data_loader = build_detection_test_loader(\n cfg,\n cfg.DATASETS.TEST[0],\n mapper=mapper,\n )\n n = len(DatasetCatalog.get(cfg.DATASETS.TEST[0]))\n return ConfusionHook(data_loader, n=n, threshold=threshold)\n","sub_path":"src/hooks/ConfusionHook.py","file_name":"ConfusionHook.py","file_ext":"py","file_size_in_byte":3260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"131343377","text":"import json\r\nimport logging\r\nimport os\r\nimport sys\r\n\r\nfrom flask import Flask, request, Response\r\nimport requests\r\nimport yaml\r\n\r\n\r\nPAYLOAD_TITLE = \"[{repository[name]}:{branch}] Build #{number} {result_text}\"\r\nPAYLOAD_DESCRIPTION = \"[`{commit:.7}`]({url}) {message}\"\r\nPAYLOAD_COMMIT_URL = \"https://github.com/{repository[owner_name]}/{repository[name]}/commit/{commit}\"\r\n\r\n\r\nwith open(\"config.yaml\") as file:\r\n config = yaml.load(file)\r\n\r\n# Fetch the env variables from Heroku os.environ for security reasons...\r\nDISCORD_WEBHOOK = os.environ[\"DISCORD_WEBHOOK\"]\r\nDISCORD_WEBHOOK_KARMANOR = os.environ[\"DISCORD_WEBHOOK_KARMANOR\"]\r\n\r\nDISCORD_JSON = config[\"discord-json\"]\r\nCOLORS = config[\"colors\"]\r\nCOLORS_AZURE = config[\"colors-azure\"]\r\n\r\nSW_JSON = config[\"sw-json\"]\r\nSW_JSON_ID = config[\"sw-json-file-id\"]\r\n\r\napp = Flask(__name__)\r\n# Is this even needed?\r\napp.config[\"SECRET_KEY\"] = os.environ.get(\"SECRET_KEY\", \"idk\")\r\n\r\n\r\n@app.route(\"/badge\", methods=[\"GET\"])\r\ndef badge():\r\n # Gets online users\r\n data = requests.get(DISCORD_JSON)\r\n data = json.loads(data.text)\r\n onlineUsersString = str(len(data['members'])) + \" Online\"\r\n\r\n # Gets total downloaded (from SW and GitHub)\r\n # Gets lifetime subscriptions from Steam Workshop\r\n data = requests.post(\r\n SW_JSON, {\"itemcount\": 1, \"publishedfileids[0]\": SW_JSON_ID})\r\n data = json.loads(data.text)\r\n totalDownloadsSteam = int(\r\n data[\"response\"][\"publishedfiledetails\"][0][\"lifetime_subscriptions\"])\r\n\r\n # Gets all downloads from GitHub\r\n data = requests.get(\r\n 'https://api.github.com/repos/ArmaAchilles/Achilles/releases')\r\n data = json.loads(data.text)\r\n totalDownloadsGitHub = 0\r\n\r\n # https://github.com/mmilidoni/github-downloads-count/blob/af4ea8ad1148450a4135c1404a58f6719ceb8960/gdc#L63\r\n for releases in data:\r\n if \"assets\" in releases:\r\n for asset in releases['assets']:\r\n totalDownloadsGitHub += asset['download_count']\r\n\r\n # get current version\r\n tagName = data[0]['tag_name']\r\n\r\n # Returns a JSON\r\n return Response(json.dumps(\r\n {\r\n 'users': onlineUsersString,\r\n 'downloads': human_format(totalDownloadsSteam + totalDownloadsGitHub),\r\n 'version': tagName.replace(\"v\", \"\")\r\n }\r\n ), mimetype='application/json')\r\n\r\n# https://stackoverflow.com/a/579376\r\n\r\n\r\ndef human_format(num):\r\n magnitude = 0\r\n while abs(num) >= 1000:\r\n magnitude += 1\r\n num /= 1000.0\r\n # add more suffixes if you need them\r\n return '{}{}'.format(round(num), ['', 'k', 'm'][magnitude])\r\n\r\n\r\n@app.route(\"/webhook\", methods=[\"POST\"])\r\ndef webhook():\r\n data = request.form[\"payload\"]\r\n data = json.loads(data)\r\n\r\n if (data[\"repository\"][\"owner_name\"] != \"ArmaAchilles\"):\r\n sys.exit()\r\n\r\n # Force lower because yaml uses lower case\r\n result = data[\"status_message\"].lower()\r\n\r\n color = COLORS[result]\r\n\r\n time = \"started_at\" if result == \"pending\" else \"finished_at\"\r\n\r\n # PHP example just uses array() but that doesn't make sense...\r\n # Idk, should ask someone who PHPs\r\n payload = {\r\n \"username\": \"Travis CI\",\r\n \"avatar_url\": \"https://i.imgur.com/kOfUGNS.png\",\r\n \"embeds\": [{\r\n \"color\": color,\r\n \"author\": {\r\n \"name\": data[\"author_name\"]\r\n # TODO: See if author username can be found in\r\n # Travis' payload, and then\r\n # `\"icon_url\" : \"https://github.com/USERNAME.png`\r\n # as described in https://stackoverflow.com/a/36380674\r\n },\r\n \"title\": PAYLOAD_TITLE.format(**data, result_text=result.capitalize()),\r\n \"url\": data[\"build_url\"],\r\n \"description\": PAYLOAD_DESCRIPTION.format(**data, url=PAYLOAD_COMMIT_URL.format(**data)),\r\n \"timestamp\": data[time]\r\n }]\r\n }\r\n\r\n resp = requests.request(\"POST\", DISCORD_WEBHOOK, json=payload, headers={\r\n \"Content-Type\": \"application/json\"})\r\n\r\n # https://stackoverflow.com/a/19569090\r\n return resp.text, resp.status_code, resp.headers.items()\r\n\r\n\r\n@app.route(\"/webhook_karmanor\", methods=[\"POST\"])\r\ndef webhook_karmanor():\r\n data = request.form[\"payload\"]\r\n data = json.loads(data)\r\n\r\n if (data[\"repository\"][\"owner_name\"] != \"ArmaAchilles\"):\r\n sys.exit()\r\n\r\n # Force lower because yaml uses lower case\r\n result = data[\"status_message\"].lower()\r\n\r\n color = COLORS[result]\r\n\r\n time = \"started_at\" if result == \"pending\" else \"finished_at\"\r\n\r\n # PHP example just uses array() but that doesn't make sense...\r\n # Idk, should ask someone who PHPs\r\n payload = {\r\n \"username\": \"Travis CI\",\r\n \"avatar_url\": \"https://i.imgur.com/kOfUGNS.png\",\r\n \"embeds\": [{\r\n \"color\": color,\r\n \"author\": {\r\n \"name\": data[\"author_name\"]\r\n # TODO: See if author username can be found in\r\n # Travis' payload, and then\r\n # `\"icon_url\" : \"https://github.com/USERNAME.png`\r\n # as described in https://stackoverflow.com/a/36380674\r\n },\r\n \"title\": PAYLOAD_TITLE.format(**data, result_text=result.capitalize()),\r\n \"url\": data[\"build_url\"],\r\n \"description\": PAYLOAD_DESCRIPTION.format(**data, url=PAYLOAD_COMMIT_URL.format(**data)),\r\n \"timestamp\": data[time]\r\n }]\r\n }\r\n\r\n resp = requests.request(\"POST\", DISCORD_WEBHOOK_KARMANOR, json=payload, headers={\r\n \"Content-Type\": \"application/json\"})\r\n\r\n # https://stackoverflow.com/a/19569090\r\n return resp.text, resp.status_code, resp.headers.items()\r\n\r\n@app.route(\"/azure\", methods=[\"POST\"])\r\ndef azure():\r\n data = request.get_json()\r\n\r\n resourceJson = requests.get(data[\"resource\"][\"url\"]).json()\r\n\r\n if (resourceJson[\"repository\"][\"id\"] != \"ArmaAchilles/Achilles\"):\r\n sys.exit()\r\n\r\n result = resourceJson[\"result\"].lower()\r\n\r\n color = COLORS_AZURE[result]\r\n\r\n commit = requests.get(\"https://api.github.com/repos/ArmaAchilles/Achilles/commits/\" + resourceJson[\"sourceVersion\"]).json()\r\n\r\n branch = resourceJson[\"sourceBranch\"].split('/')[2]\r\n\r\n payload = {\r\n \"username\": \"Azure Pipelines\",\r\n \"avatar_url\": \"https://i.imgur.com/2PJdoTK.png\",\r\n \"embeds\": [{\r\n \"color\": color,\r\n \"author\": {\r\n \"name\": commit[\"author\"][\"login\"],\r\n \"url\": commit[\"author\"][\"html_url\"],\r\n \"icon_url\": commit[\"author\"][\"avatar_url\"]\r\n },\r\n \"title\": \"[{repository}:{branch}] Build #{number} {result}\".format(\r\n repository=\"Achilles\", branch=branch, number=resourceJson[\"id\"], result=result.capitalize()\r\n ),\r\n \"url\": resourceJson[\"_links\"][\"web\"][\"href\"],\r\n \"description\": \"[`{commit:.7}`]({url}) {message}\".format(\r\n commit=commit[\"sha\"], url=commit[\"html_url\"], message=commit[\"commit\"][\"message\"]\r\n ),\r\n \"timestamp\": resourceJson[\"finishTime\"]\r\n }]\r\n }\r\n\r\n resp = requests.request(\"POST\", DISCORD_WEBHOOK, json=payload, headers={\r\n \"Content-Type\": \"application/json\"})\r\n\r\n return resp.text, resp.status_code, resp.headers.items()\r\n\r\n\r\n@app.errorhandler(500)\r\ndef server_error(e):\r\n logging.exception(\"Error :/\")\r\n return \"\"\"\r\n Idk, server error :/\r\n\r\n
{}
\r\n\r\n sorry\r\n \"\"\".format(e), 500\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"92321789","text":"import argparse\n\nfrom common.base.BatchFile import load_batch\nfrom common.base.PhoneSet import PhoneSet\nfrom common.base.LexiconManager import LexiconManager\nfrom common.decoding.NBestList import NBestList\nfrom common.decoding.FillerManager import FillerManager\nfrom common.decoding.HypothesisLattice import HypothesisLattice\n\nparser = argparse.ArgumentParser(description='')\nparser.add_argument('phonetic_symbol_set', help='')\nparser.add_argument('pronunciation_lexicon', help='')\nparser.add_argument('batch', help='')\nparser.add_argument('nbest_maximum_number_entries', type=int, help='')\nparser.add_argument('--ip', dest='insertion_penalty', type=float, help='')\nparser.add_argument('--ipf', dest='insertion_penalties_file', help='')\nparser.add_argument('--ams', dest='acoustic_model_scale_factor', type=float, default=0.0, help='')\nparser.add_argument('--lms', dest='language_model_scale_factor', type=float, default=0.0, help='')\nparser.add_argument('--res', dest='rescoring_method', choices=['likelihood', 'pp'], default='likelihood', help='')\nargs = parser.parse_args()\n\n\ndef lattice_nbest(phonetic_symbol_set, pronunciation_lexicon, insertion_penalty,\n insertion_penalty_file, nbest_maximum_number_entries, batch,\n acoustic_model_scale_factor, language_model_scale_factor, rescoring_method):\n phone_set = PhoneSet(phonetic_symbol_set)\n lexicon_manager = LexiconManager(pronunciation_lexicon, phone_set)\n\n if insertion_penalty:\n # global insertion penalty\n lexicon_manager.attach_lex_unit_penalties(insertion_penalty, insertion_penalty)\n\n # insertion penalty for fillers\n if insertion_penalty_file:\n filler_manager = FillerManager(insertion_penalty_file)\n filler_manager.attach_insertion_penalty_fillers(lexicon_manager)\n\n assert nbest_maximum_number_entries > 0\n\n batch_list = load_batch(batch, 2)\n for item in batch_list:\n lattice, nbest = item\n\n # load the lattice\n hypothesis_lattice = HypothesisLattice(phone_set, lexicon_manager, lattice)\n\n if rescoring_method == 'likelihood':\n hypothesis_lattice.set_scaling_factors(acoustic_model_scale_factor, language_model_scale_factor)\n\n if insertion_penalty:\n hypothesis_lattice.attach_insertion_penalty(lexicon_manager)\n\n # generate the n-best list\n nbest_list = hypothesis_lattice.create_nbest_list(nbest_maximum_number_entries, rescoring_method)\n nbest_list.store(nbest, True)\n\n\nlattice_nbest(args.phonetic_symbol_set, args.pronunciation_lexicon, args.insertion_penalty,\n args.insertion_penalty_file, args.nbest_maximum_number_entries, args.batch,\n args.acoustic_model_scale_factor, args.language_model_scale_factor, args.rescoring_method)","sub_path":"tools/lattice_editor/lattice_nbest.py","file_name":"lattice_nbest.py","file_ext":"py","file_size_in_byte":2802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"210887979","text":"\n### one code one day\n### 2020/03/13\n### leetcode 646 最长的数对链\n### 动态规划 排序加子序列\n### 最高效方法对pairs[1]排序,然后贪心\n\ndef findLongestChain(self, pairs: List[List[int]]) -> int:\n if(len(pairs) < 2):\n return len(pairs)\n else:\n pairs.sort()\n dp = [1] * len(pairs)\n for i in range(1,len(pairs)):\n for j in range(i):\n if(pairs[j][1] < pairs[i][0]):\n dp[i] = max(dp[i], dp[j] + 1)\n return max(dp)","sub_path":"动态规划/findLongestChain.py","file_name":"findLongestChain.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"413934376","text":"import numpy as np\n\n\ndef train_error(dataset):\n '''\n The training error of the dataset\n '''\n p = calc_p(dataset)\n return min(p, 1 - p)\n\n\ndef entropy(dataset):\n '''\n The entropy formula https://en.wikipedia.org/wiki/ID3_algorithm#Entropy\n C(p) = -p*log(p) - (1-p)log(1-p)\n '''\n p = calc_p(dataset)\n # Can't take log of 0\n if p == 0 or p == 1:\n return 0\n return (-1 * p)*(np.log2(p)) - (1 - p)*(np.log2(1 - p))\n\n\ndef gini_index(dataset):\n '''\n Gini index formula\n C(p) = 2*p*(1-p)\n '''\n p = calc_p(dataset)\n return 2*p*(1-p)\n\n\ndef calc_p(dataset):\n num_y_1 = 0\n for data in dataset:\n if data[0] == 1:\n num_y_1 += 1\n if len(dataset) == 0:\n return 0\n\n p = num_y_1/len(dataset)\n\n return p\n\n\nclass Node:\n def __init__(self, left=None, right=None, depth=0, index_split_on=0, isleaf=False, label=1, info=None):\n self.left = left\n self.right = right\n self.depth = depth\n self.index_split_on = index_split_on\n self.isleaf = isleaf\n self.label = label\n self.info = {} if info is None else info\n\n\nclass DecisionTree:\n\n def __init__(self, data, validation_data=None, gain_function=entropy, max_depth=40):\n self.max_depth = max_depth\n self.root = Node()\n self.root.info['cost'] = 0\n self.gain_function = gain_function\n self.majority_label = self.m_label(data) # holds the most common label in the whole dataset\n\n indices = list(range(1, len(data[0])))\n\n self._split_recurs(self.root, data, indices)\n\n # Pruning\n if not (validation_data is None):\n self._prune_recurs(self.root, validation_data)\n\n def m_label(self, data):\n self.majority_label = data[0][0]\n return self.calc_label(data)\n\n def predict(self, features):\n return self._predict_recurs(self.root, features)\n\n def accuracy(self, data):\n return 1 - self.loss(data)\n\n def loss(self, data):\n cnt = 0.0\n test_Y = [row[0] for row in data]\n for i in range(len(data)):\n prediction = self.predict(data[i])\n if (prediction != test_Y[i]):\n cnt += 1.0\n return cnt/len(data)\n\n def _predict_recurs(self, node, row):\n if node.isleaf or node.index_split_on == 0:\n return node.label\n split_index = node.index_split_on\n if row[split_index]:\n return self._predict_recurs(node.left, row)\n else:\n return self._predict_recurs(node.right, row)\n\n def _prune_recurs(self, node, validation_data):\n if node.isleaf:\n return\n\n self._prune_recurs(node.left, validation_data)\n self._prune_recurs(node.right, validation_data)\n\n current_loss = self.loss(validation_data)\n if node.left.isleaf and node.right.isleaf:\n node.isleaf = True\n node.label = node.info['predicted']\n if self.loss(validation_data) >= current_loss:\n node.isleaf = False\n node.label = -1\n else:\n node.left = None\n node.right = None\n\n def _is_terminal(self, node, data, indices):\n if (\n len(data) == 0\n or len(indices) == 0\n or self.same_class(data)\n or node.depth > self.max_depth\n ):\n return (True, self.calc_label(data))\n\n return (False, -1)\n\n def calc_label(self, data):\n labels = [item[0] for item in data]\n # If we have no label, return the most likely label for the set\n if len(labels) == 0:\n return self.majority_label\n # Otherwise return the most likely label for this branch\n return max(set(labels), key=labels.count)\n\n def same_class(self, data):\n labels = [item[0] for item in data]\n if len(labels) == 0:\n return True\n first = labels[0]\n for data_item in labels:\n if data_item == first:\n continue\n else:\n return False\n return True\n\n def _split_recurs(self, node, rows, indices):\n terminal = self._is_terminal(node, rows, indices)\n node.isleaf = terminal[0]\n node.label = terminal[1]\n node.info['data_size'] = len(rows)\n\n if terminal[0]:\n return\n\n node.info['predicted'] = self.calc_label(rows)\n split_index = indices[0]\n max_gain = 0\n split_pos = 0\n\n for i in range(0, len(indices)):\n gain = self._calc_gain(rows, indices[i], self.gain_function)\n # print(\"gain choice \", gain)\n if gain > max_gain:\n split_index = indices[i]\n max_gain = gain\n split_pos = i\n # print(\"gain chosen \", max_gain)\n\n node.index_split_on = split_index\n del indices[split_pos]\n\n split_on_true = [data for data in rows if data[split_index]]\n split_on_false = [data for data in rows if not data[split_index]]\n\n node.right = Node()\n node.right.info['cost'] = max_gain\n node.right.depth = node.depth + 1\n\n node.left = Node()\n node.left.info['cost'] = max_gain\n node.left.depth = node.depth + 1\n\n # print(len(split_on_true), \" \", len(split_on_false))\n\n self._split_recurs(node.left, split_on_true, indices[:])\n self._split_recurs(node.right, split_on_false, indices[:])\n\n def _calc_gain(self, data, split_index, gain_function):\n split_on_true = [data_point for data_point in data if data_point[split_index]]\n split_on_false = [data_point for data_point in data if not data_point[split_index]]\n\n return gain_function(data) - (\n len(split_on_true)/len(data) * gain_function(split_on_true) +\n len(split_on_false)/len(data) * gain_function(split_on_false)\n )\n\n def loss_plot_vec(self, data):\n self._loss_plot_recurs(self.root, data, 0)\n loss_vec = []\n q = [self.root]\n num_correct = 0\n while len(q) > 0:\n node = q.pop(0)\n num_correct = num_correct + node.info['curr_num_correct']\n loss_vec.append(num_correct)\n if node.left is not None:\n q.append(node.left)\n if node.right is not None:\n q.append(node.right)\n\n return 1 - np.array(loss_vec)/len(data)\n\n def _loss_plot_recurs(self, node, rows, prev_num_correct):\n labels = [row[0] for row in rows]\n curr_num_correct = labels.count(node.label) - prev_num_correct\n node.info['curr_num_correct'] = curr_num_correct\n\n if not node.isleaf:\n left_data, right_data = [], []\n left_num_correct, right_num_correct = 0, 0\n for row in rows:\n if row[node.index_split_on]:\n left_data.append(row)\n else:\n right_data.append(row)\n\n left_labels = [row[0] for row in left_data]\n left_num_correct = left_labels.count(node.label)\n right_labels = [row[0] for row in right_data]\n right_num_correct = right_labels.count(node.label)\n\n if node.left is not None:\n self._loss_plot_recurs(node.left, left_data, left_num_correct)\n if node.right is not None:\n self._loss_plot_recurs(node.right, right_data, right_num_correct)\n","sub_path":"decision_tree.py","file_name":"decision_tree.py","file_ext":"py","file_size_in_byte":7495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"452711859","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nfrom library.aux import try_save_fig\n\nTARGET_SP_RADIUS = 0.95 # before 0.9\nINSTANCES = 100\nORTHOPROCESS_ITERATIONS = 100\nrho = TARGET_SP_RADIUS\ntau = 0.01 # previously 0.001\neta = 3*10**-2 # learning rate, 1*10**-1 je uz privela\nreservoir_sizes = list(range(100, 1000 + 1, 100)) # [16, 25, 64, 100]\n# reservoir_sizes = list(range(10, 100 + 1, 10)) # [16, 25, 64, 100]\n\nmcb_mean = np.load('mcbm.npy')\nmcb_std = np.load('mcbs.npy')\nleb_mean = np.load('lbm.npy')\nleb_std = np.load('lbs.npy')\n\nmca_mean = np.load('mcam.npy')\nmca_std = np.load('mcas.npy')\nlea_mean = np.load('lam.npy')\nlea_std = np.load('las.npy')\n\n\ndef replot():\n # res_size vs mc\n ax1 = plt.subplot(211)\n ax1.plot(reservoir_sizes, reservoir_sizes, 'k', linestyle=\":\")\n ax1.errorbar(reservoir_sizes, mcb_mean, yerr=mcb_std, label=\"before \", fmt=\"--\")\n ax1.errorbar(reservoir_sizes, mca_mean, yerr=mca_std, label=\"after\")\n plt.ylabel(\"MC\")\n plt.title(\"ON method\")\n plt.grid(True)\n\n ax2 = plt.subplot(212)\n ax2.errorbar(reservoir_sizes, leb_mean, yerr=leb_std, label=\"before \", fmt=\"--\")\n ax2.errorbar(reservoir_sizes, lea_mean, yerr=lea_std, label=\"after\")\n plt.ylabel(\"LE\")\n plt.grid(True)\n plt.legend(loc=4)\n plt.xlabel(\"reservoir size\")\n plt.yticks(np.arange(0.0, -0.11, -0.02))\n\n try_save_fig(\"figures/figure\")\n try_save_fig(\"figures/figure\", ext=\"pdf\")\n plt.show()\n\nreplot()\n","sub_path":"v2/7. on_n_vs_mc_le/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"568786624","text":"\"\"\"\nWraps sklearn SVM classes to allow\nfor set kernels\n\"\"\"\nfrom numpy import matrix, vstack, hstack\nimport numpy as np\nimport scipy.sparse as sp\nfrom scipy.spatial.distance import cdist\nfrom scipy.optimize import fmin_bfgs as fmin\nimport math\n\nfrom progress import ProgressMonitor\n\nMEM_LIMIT = 1024*1024*1024 # 1GB\n\nclass SetSVM(object):\n\n def __init__(self, estimator_class, set_kernel, **kwargs):\n self.set_kernel = _by_name(set_kernel)\n self.estimator = estimator_class(**kwargs)\n\n def fit(self, X, y):\n X = map(np.asmatrix, X)\n self.fit_data = X\n gram_matrix = self.set_kernel(X, X)\n self.estimator.fit(gram_matrix, y)\n return self\n\n def predict(self, X):\n gram_matrix = self.set_kernel(X, self.fit_data)\n return self.estimator.predict(gram_matrix)\n\n def decision_function(self, X):\n gram_matrix = self.set_kernel(X, self.fit_data)\n return self.estimator.decision_function(gram_matrix)\n\ndef _by_name(full_name):\n parts = full_name.split('_')\n name = parts.pop(0)\n\n try:\n # See if second part is a number\n value = float(parts[0])\n parts.pop(0)\n except: pass\n\n if name == 'linear':\n kernel = linear\n elif name == 'quadratic':\n kernel = quadratic\n elif name == 'p':\n kernel = polynomial(int(value))\n elif name == 'rbf':\n kernel = rbf(value)\n else:\n raise ValueError('Unknown Kernel type %s' % name)\n\n try:\n # See if remaining part is a norm\n norm_name = parts.pop(0)\n if norm_name == 'fs':\n norm = featurespace_norm\n elif norm_name == 'av':\n norm = averaging_norm\n elif norm_name == 'md':\n norm = 'median'\n else:\n raise ValueError('Unknown norm %s' % norm_name)\n except IndexError:\n norm = no_norm\n\n kernel_function = set_kernel(kernel, norm)\n kernel_function.name = full_name\n return kernel_function\n\ndef averaging_norm(x, *args):\n return float(x.shape[0])\n\ndef featurespace_norm(x, k):\n return math.sqrt(np.sum(k(x, x)))\n\ndef no_norm(x, k):\n return 1.0\n\ndef _prog(plist):\n progress = ProgressMonitor(total=len(plist), print_interval=1,\n msg='Constructing Kernel')\n for p in plist:\n yield p\n progress.increment()\n\ndef set_kernel(k, normalizer=no_norm):\n \"\"\"\n Decorator that makes a normalized\n set kernel out of a standard kernel k\n \"\"\"\n # Check special case\n # (kind of a hack; make it better eventually)\n if normalizer == 'median':\n return median_kernel(k)\n\n def K(X, Y):\n if type(X) == list:\n norm = lambda x: normalizer(x, k)\n xinst = sum(map(len, X))\n yinst = sum(map(len, Y))\n if xinst*yinst*8 >= MEM_LIMIT:\n x_norm = matrix(map(norm, X))\n y_norm = matrix(map(norm, Y))\n norms = x_norm.T*y_norm\n raw_kernel = np.array([[np.sum(k(x,y)) for y in Y]\n for x in _prog(X)])\n else:\n x_norm = matrix(map(norm, X))\n if id(X) == id(Y):\n # Optimize for symmetric case\n norms = x_norm.T*x_norm\n if all(len(bag) == 1 for bag in X):\n # Optimize for singleton bags\n instX = vstack(X)\n raw_kernel = k(instX, instX)\n else:\n # Only need to compute half of\n # the matrix if it's symmetric\n upper = matrix([i*[0] + [np.sum(k(x, y))\n for y in Y[i:]]\n for i, x in enumerate(X, 1)])\n diag = np.array([np.sum(k(x, x)) for x in X])\n raw_kernel = upper + upper.T + spdiag(diag)\n else:\n y_norm = matrix(map(norm, Y))\n norms = x_norm.T*y_norm\n raw_kernel = k(vstack(X), vstack(Y))\n lensX = map(len, X)\n lensY = map(len, Y)\n if any(l != 1 for l in lensX):\n raw_kernel = vstack([np.sum(raw_kernel[i:j, :], axis=0)\n for i, j in slices(lensX)])\n if any(l != 1 for l in lensY):\n raw_kernel = hstack([np.sum(raw_kernel[:, i:j], axis=1)\n for i, j in slices(lensY)])\n return np.divide(raw_kernel, norms)\n else:\n return k(X, Y)\n return K\n\ndef linear(x, y):\n \"\"\"Linear kernel x'*y\"\"\"\n return np.dot(x, y.T)\n\ndef quadratic(x, y):\n \"\"\"Quadratic kernel (1 + x'*y)^2\"\"\"\n return np.square(1e0 + np.dot(x,y.T))\n\ndef polynomial(p):\n \"\"\"General polynomial kernel (1 + x'*y)^p\"\"\"\n def p_kernel(x, y):\n return np.power(1e0 + np.dot(x,y.T), p)\n return p_kernel\n\ndef rbf(gamma):\n \"\"\"Radial Basis Function\"\"\"\n def rbf_kernel(x, y):\n return matrix(np.exp(-gamma*cdist(x, y, 'sqeuclidean')))\n return rbf_kernel\n\ndef slices(groups):\n \"\"\"\n Generate slices to select\n groups of the given sizes\n within a list/matrix\n \"\"\"\n i = 0\n for group in groups:\n yield i, i + group\n i += group\n\ndef spdiag(x):\n n = len(x)\n return sp.spdiags(x.flat, [0], n, n)\n\ndef median_weight(k, X):\n n = len(X)\n K = k(X, X)\n a0 = np.ones(n) / float(n)\n if n <= 2:\n return a0\n\n def distances(a):\n a = np.asarray(a)\n sq_dists = np.array([float(K[i, i] - 2 * np.dot(K[i, :], a) + np.dot(np.dot(a.T, K), a))\n for i in range(n)])\n return np.sqrt(sq_dists)\n\n def f(a):\n return np.sum(distances(a))\n\n def grad(a):\n a = np.asarray(a)\n dists = distances(a)\n g = sum((np.dot(K, a) - K[i, :]) / dists[i]\n for i in range(n) if dists[i] != 0)\n if type(g) == int:\n return np.zeros(a.shape)\n return np.array(list(g.flat))\n\n astar = fmin(f, a0, fprime=grad, disp=0)\n astar = np.asarray(astar)\n if np.any(np.isnan(astar)):\n return a0\n else:\n return astar\n\ndef median_kernel(k):\n \"\"\"\n Makes a \"median kernel\" out of instance kernel k\n \"\"\"\n def make_weights(X):\n n = len(X)\n prog = ProgressMonitor(total=n, print_interval=1,\n msg='Constructing Kernel')\n ws = []\n for x in X:\n prog.increment()\n ws.append(median_weight(k, x))\n return ws\n\n def K(X, Y):\n X_meds = make_weights(X)\n if id(X) == id(Y):\n Y_meds = X_meds\n else:\n Y_meds = make_weights(Y)\n kernel = np.array([[float(np.dot(np.dot(a.T,k(x, y)),b))\n for y, b in zip(Y, Y_meds)]\n for x, a in zip(X, X_meds)])\n return kernel\n return K\n","sub_path":"smile/active_learning/src/set_svm.py","file_name":"set_svm.py","file_ext":"py","file_size_in_byte":7090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"516774219","text":"'''\nCreated on Feb. 8, 2020\n\n@author: mferl\n'''\n\n# Imports\nfrom machine import Pin\nimport time\n\n###################### Global Variables ######################\n# Outputs\nC1 = Pin(13,Pin.OUT)\nC2 = Pin(12,Pin.OUT)\nC3 = Pin(14,Pin.OUT)\n\n# Inputs\nR1 = Pin(2,Pin.IN,Pin.PULL_UP)\nR2 = Pin(0,Pin.IN,Pin.PULL_UP)\nR3 = Pin(4,Pin.IN,Pin.PULL_UP)\nR4 = Pin(5,Pin.IN,Pin.PULL_UP)\n\n# Other Variables \nkeys = ['1','4','7','*','2','5','8','0', '3','6','9','#']\n#############################################################\n\n# scanKeypad()\ndef scanKeypad(kIndex, inputRowsList):\n # Check bits in each row\n for rowBit in inputRowsList:\n # Found key pressed\n if rowBit == 0:\n return(keys[kIndex])\n \n # Increment\n kIndex += 1 \n \n # Returns -1 if no key was pressed\n return -1\n\n######################## Main Method ########################\ndef AppEntryPoint():\n # Continually Scan\n while True:\n # Set Counter\n kIndex = 0\n\n ########### Scan the LEFT column ###########\n C1(0)\n C2(1)\n C3(1) \n keyPressed = scanKeypad(kIndex, [R1(), R2(), R3(), R4()])\n \n # Output\n if keyPressed != -1:\n print(keyPressed)\n \n # Increment\n kIndex += 4 \n \n ########### Scan the MIDDLE column ###########\n C1(1)\n C2(0)\n C3(1) \n keyPressed = scanKeypad(kIndex, [R1(), R2(), R3(), R4()])\n \n # Output\n if keyPressed != -1:\n print(keyPressed)\n \n # Increment\n kIndex += 4 \n \n ########### Scan the RIGHT column ###########\n C1(1)\n C2(1)\n C3(0) \n keyPressed = scanKeypad(kIndex, [R1(), R2(), R3(), R4()])\n \n # Output\n if keyPressed != -1:\n print(keyPressed)\n \n return \n\n# Call AppEntryPoint()\nAppEntryPoint()","sub_path":"Session 1/Code/ScanningKeypads.py","file_name":"ScanningKeypads.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"34275492","text":"import csv\nimport numpy as np\nfrom pomegranate import *\nimport matplotlib.pyplot as plt\n\ninput1 = csv.reader(open('datos/1-path.csv','r'))\ninput2 = csv.reader(open('datos/2-path.csv','r'))\ninput3 = csv.reader(open('datos/3-path.csv','r'))\ninput4 = csv.reader(open('datos/4-path.csv','r'))\ninput5 = csv.reader(open('datos/5-path.csv','r'))\n\npathTrain = []\npathTest = []\npathInt = []\npathDic = {}\ntypesTrain = []\ntypesTest = []\ntypesInt = []\ntypesDic = {'PAGE': 0, 'EVENT': 1}\nlenTrain = []\nlenTest = []\nlenInt = []\n\nnvis1 = []\ni=0\ntempId = 0\n\nfor row in input1:\n\tif (i > 0):\n\t\tnvis1.append(row[1])\n\n\t\tif ((int(row[1])-int(nvis1[0])) < 200):\n\t\t\tif (row[6] == 'PAGE'):\n\t\t\t\tif (row[7] not in pathDic):\n\t\t\t\t\tpathDic[row[7]] = len(pathDic)\t\t\n\t\t\t\tpathTrain.append(pathDic[row[7]])\n\t\t\telif (row[6] == 'EVENT'):\n\t\t\t\tif (row[9] not in pathDic):\n\t\t\t\t\tpathDic[row[9]] = len(pathDic)\t\t\n\t\t\t\tpathTrain.append(pathDic[row[9]])\n\t\t\telse:\n\t\t\t\tprint('Nuevo tipo distinto')\n\t\t\t\tbreak\n\n\t\t\tif (tempId != row[0] ):\n\t\t\t\ttempId = row[0]\n\t\t\t\tlenTrain.append(int(row[3]))\n\n\t\t\ttypesTrain.append(typesDic[row[6]])\n\n\t\telse:\n\t\t\tif (row[6] == 'PAGE'):\n\t\t\t\tif (row[7] not in pathDic):\n\t\t\t\t\tpathDic[row[7]] = len(pathDic)\t\t\n\t\t\t\tpathTest.append(pathDic[row[7]])\n\t\t\telif (row[6] == 'EVENT'):\n\t\t\t\tif (row[9] not in pathDic):\n\t\t\t\t\tpathDic[row[9]] = len(pathDic)\t\t\n\t\t\t\tpathTest.append(pathDic[row[9]])\n\t\t\telse:\n\t\t\t\tprint('Nuevo tipo distinto')\n\t\t\t\tbreak\n\n\t\t\tif (tempId != row[0] ):\n\t\t\t\ttempId = row[0]\n\t\t\t\tlenTest.append(int(row[3]))\n\n\t\t\ttypesTest.append(typesDic[row[6]])\n\n\ti+=1\n\nnvis1.clear()\ni=0\ntempId = 0\n\nfor row in input2:\n\tif (i > 0):\n\t\tnvis1.append(row[1])\n\n\t\tif ((int(row[1])-int(nvis1[0])) < 50):\n\t\t\tif (row[6] == 'PAGE'):\n\t\t\t\tif (row[7] not in pathDic):\n\t\t\t\t\tpathDic[row[7]] = len(pathDic)\t\t\n\t\t\t\tpathInt.append(pathDic[row[7]])\n\t\t\telif (row[6] == 'EVENT'):\n\t\t\t\tif (row[9] not in pathDic):\n\t\t\t\t\tpathDic[row[9]] = len(pathDic)\t\t\n\t\t\t\tpathInt.append(pathDic[row[9]])\n\t\t\telse:\n\t\t\t\tprint('Nuevo tipo distinto')\n\t\t\t\tbreak\n\n\t\t\tif (tempId != row[0] ):\n\t\t\t\ttempId = row[0]\n\t\t\t\tlenInt.append(int(row[3]))\n\n\t\t\ttypesInt.append(typesDic[row[6]])\n\n\t\telse:\n\t\t\tbreak\n\ti+=1\n\nnvis1.clear()\ni=0\ntempId = 0\n\nfor row in input3:\n\tif (i > 0):\n\t\tnvis1.append(row[1])\n\n\t\tif ((int(row[1])-int(nvis1[0])) < 50):\n\t\t\tif (row[6] == 'PAGE'):\n\t\t\t\tif (row[7] not in pathDic):\n\t\t\t\t\tpathDic[row[7]] = len(pathDic)\t\t\n\t\t\t\tpathInt.append(pathDic[row[7]])\n\t\t\telif (row[6] == 'EVENT'):\n\t\t\t\tif (row[9] not in pathDic):\n\t\t\t\t\tpathDic[row[9]] = len(pathDic)\t\t\n\t\t\t\tpathInt.append(pathDic[row[9]])\n\t\t\telse:\n\t\t\t\tprint('Nuevo tipo distinto')\n\t\t\t\tbreak\n\n\t\t\tif (tempId != row[0] ):\n\t\t\t\ttempId = row[0]\n\t\t\t\tlenInt.append(int(row[3]))\n\n\t\t\ttypesInt.append(typesDic[row[6]])\n\n\t\telse:\n\t\t\tbreak\n\n\ti+=1\n\nnvis1.clear()\ni=0\ntempId = 0\n\nfor row in input4:\n\tif (i > 0):\n\t\tnvis1.append(row[1])\n\n\t\tif ((int(row[1])-int(nvis1[0])) < 50):\n\t\t\tif (row[6] == 'PAGE'):\n\t\t\t\tif (row[7] not in pathDic):\n\t\t\t\t\tpathDic[row[7]] = len(pathDic)\t\t\n\t\t\t\tpathInt.append(pathDic[row[7]])\n\t\t\telif (row[6] == 'EVENT'):\n\t\t\t\tif (row[9] not in pathDic):\n\t\t\t\t\tpathDic[row[9]] = len(pathDic)\t\t\n\t\t\t\tpathInt.append(pathDic[row[9]])\n\t\t\telse:\n\t\t\t\tprint('Nuevo tipo distinto')\n\t\t\t\tbreak\n\n\t\t\tif (tempId != row[0] ):\n\t\t\t\ttempId = row[0]\n\t\t\t\tlenInt.append(int(row[3]))\n\n\t\t\ttypesInt.append(typesDic[row[6]])\n\n\t\telse:\n\t\t\tbreak\n\n\ti+=1\n\nnvis1.clear()\ni=0\ntempId = 0\n\nfor row in input5:\n\tif (i > 0):\n\t\tnvis1.append(row[1])\n\n\t\tif ((int(row[1])-int(nvis1[0])) < 50):\n\t\t\tif (row[6] == 'PAGE'):\n\t\t\t\tif (row[7] not in pathDic):\n\t\t\t\t\tpathDic[row[7]] = len(pathDic)\t\t\n\t\t\t\tpathInt.append(pathDic[row[7]])\n\t\t\telif (row[6] == 'EVENT'):\n\t\t\t\tif (row[9] not in pathDic):\n\t\t\t\t\tpathDic[row[9]] = len(pathDic)\t\t\n\t\t\t\tpathInt.append(pathDic[row[9]])\n\t\t\telse:\n\t\t\t\tprint('Nuevo tipo distinto')\n\t\t\t\tbreak\n\n\t\t\tif (tempId != row[0] ):\n\t\t\t\ttempId = row[0]\n\t\t\t\tlenInt.append(int(row[3]))\n\n\t\t\ttypesInt.append(typesDic[row[6]])\n\n\t\telse:\n\t\t\tbreak\n\n\ti+=1\n\n#print(lenTrain)\n#print(lenTest)\nseq = np.array((typesTrain,pathTrain))\n#print(seq)\n\nprob = [0.25, 0.5, 0.75]\n\nfor h in range (1,18):\n\n\tfor l in prob:\n\n\t\tfor k in prob:\n\n\t\t\t#model = HiddenMarkovModel.from_samples(DiscreteDistribution, n_components=5, X=seq)\n\t\t\tmodel = HiddenMarkovModel.from_samples(NormalDistribution, n_components=h, X=seq, algorithm='baum-welch', edge_inertia=l, distribution_inertia=k)\n\t\t\tmodel.bake()\n\t\t\t#print(model.viterbi(np.array((types[0:3926],path[0:3926]))))\n\t\t\tcounterTrain = 0\n\t\t\tscores_Train = []\n\n\t\t\tfor x in lenTrain:\n\t\t\t\tif (int(x) > 1):\n\t\t\t\t\tscoreTrain = model.log_probability(np.array((typesTrain[counterTrain:counterTrain+x-1],pathTrain[counterTrain:counterTrain+x-1])))\n\t\t\t\t\tscores_Train.append(scoreTrain)\n\t\t\t\tcounterTrain += x\n\n\t\t\tcounterTest = 0\n\t\t\tscores_Test = []\n\n\t\t\tfor x in lenTest:\n\t\t\t\tif (int(x) > 1):\n\t\t\t\t\tscoreTest = model.log_probability(np.array((typesTest[counterTest:counterTest+x-1],pathTest[counterTest:counterTest+x-1])))\n\t\t\t\t\tscores_Test.append(scoreTest)\n\t\t\t\tcounterTest += x\n\n\t\t\tcounterInt = 0\n\t\t\tscores_Int = []\n\n\t\t\tfor x in lenInt:\n\t\t\t\tif (int(x) > 1):\n\t\t\t\t\tscoreInt = model.log_probability(np.array((typesInt[counterInt:counterInt+x-1],pathInt[counterInt:counterInt+x-1])))\n\t\t\t\t\tscores_Int.append(scoreInt)\n\t\t\t\tcounterInt += x\n\n\t\t\tlength_train = len(scores_Train)\n\t\t\tlength_val = len(scores_Test) + length_train\n\t\t\tlength_int = len(scores_Int) + length_val\n\n\t\t\tplt.figure(figsize=(9,7))\n\t\t\tplt.scatter(np.arange(length_train), scores_Train, c='b', label='trainset')\n\t\t\tplt.scatter(np.arange(length_train, length_val), scores_Test, c='r', label='testset - original')\n\t\t\tplt.scatter(np.arange(length_val, length_int), scores_Int, c='g', label='testset - intruso')\n\t\t\tplt.title(label=\"N comp: \"+str(h)+\" Edge:\"+str(l)+\" Distribution:\"+str(k))\n\t\t\tplt.savefig(\"img/pomeConInercia_comp\"+str(h)+\"_edge\"+str(l)+\"Dist\"+str(k)+\".png\")\n\t\t\tplt.close()\n#plt.show()\n#print(model.log_probability(np.array((typesTest,pathTest))))\n\n\n","sub_path":"granate2.py","file_name":"granate2.py","file_ext":"py","file_size_in_byte":5876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"35575533","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n#\n# Copyright © 2015 jaidev \n#\n# Distributed under terms of the MIT license.\n\n\"\"\"\nExample showing a Unterberger distribution of a hyperbolic group delay signal.\n\"\"\"\n\nfrom tftb.processing import unterberger\nfrom tftb.generators import gdpower\nimport numpy as np\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport matplotlib.pyplot as plt\n\nsig = gdpower(128, -1)[0]\ntfr, t, f = unterberger(sig, fmin=0.01, fmax=0.22, n_voices=172)\ntfr = np.abs(tfr) ** 2\nthreshold = np.amax(tfr) * 0.05\ntfr[tfr <= threshold] = 0.0\nt, f = np.meshgrid(t, f)\n\nfig, axContour = plt.subplots()\naxContour.contour(t, f, tfr)\naxContour.grid(True)\naxContour.set_title(\"Unterberger distribution of hyperbolic GD signal.\")\naxContour.set_ylabel('Frequency')\naxContour.set_xlabel('Time')\n\ndivider = make_axes_locatable(axContour)\naxTime = divider.append_axes(\"top\", 1.2, pad=0.5)\naxFreq = divider.append_axes(\"left\", 1.2, pad=0.5)\naxTime.plot(np.real(sig))\naxTime.set_xlim(0, 128)\naxTime.set_ylabel('Real part')\naxTime.set_title('Signal in time')\naxTime.grid(True)\naxFreq.plot((abs(np.fft.fftshift(np.fft.fft(sig))) ** 2)[::-1][:64],\n np.arange(sig.shape[0] / 2))\naxFreq.set_ylabel('Spectrum')\naxFreq.grid(True)\nplt.show()\nplt.show()\n","sub_path":"doc/_gallery/noplot/noplot_4_2_2_untberger_hyperbolic_gd.py","file_name":"noplot_4_2_2_untberger_hyperbolic_gd.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"1916666","text":"import tensorflow as tf\nimport numpy as np\nfrom utils import bins2ab, random_mini_batches, annealed_mean\nimport os\n\nclass train_evaluate:\n def __init__(self, params, model, weights_file = None, model_type = 'classification'):\n # params: hyperparameter\n # model: Network model \n self.params = params\n self.weights_file = weights_file\n self.model_type = model_type\n self.model = self.build_model(model)\n\n def build_model(self, model):\n with tf.variable_scope('model', reuse = False):\n model = model(self.params, is_training = True) #batch norm\n return model\n\n def restoreSession(self, last_saver, sess, restore_from, is_training):\n # Restore sess, cost from last training\n begin_at_epoch = 0\n costs = []\n dev_costs = []\n best_dev_accuracy = float('-inf')\n dev_accuracies = []\n train_accuracies = []\n if restore_from is not None:\n if os.path.isdir(restore_from):\n sess_path = tf.train.latest_checkpoint(restore_from)\n begin_at_epoch = int(sess_path.split('-')[-1])\n last_saver.restore(sess, sess_path)\n \n if is_training:\n costs = np.load(os.path.join(restore_from, \"costs.npy\")).tolist()\n dev_costs = np.load(os.path.join(restore_from, \"dev_costs.npy\")).tolist()\n dev_accuracies = np.load(os.path.join(restore_from, \"dev_accuracies.npy\")).tolist()\n train_accuracies = np.load(os.path.join(restore_from, \"train_accuracies.npy\")).tolist()\n best_dev_accuracy = np.load(os.path.join(restore_from,\"best_dev_accuracy.npy\"))[0]\n\n return begin_at_epoch, costs, dev_costs, best_dev_accuracy, dev_accuracies, train_accuracies\n\n def train(self, X_train, Y_train, X_dev, Y_dev, model_dir, restore_from = None, print_cost = True):\n m = X_train.shape[0]\n \n model = self.model\n accuracy = model.accuracy\n cost = model.cost\n\n optimizer = tf.train.AdamOptimizer(self.params.learning_rate).minimize(cost)\n\n last_saver = tf.train.Saver(max_to_keep = 1)\n best_saver = tf.train.Saver(max_to_keep = 1)\n \n with tf.Session() as sess:\n init = tf.global_variables_initializer()\n sess.run(init)\n\n if (self.weights_file is not None) and (restore_from is None):\n model.load_weights(self.weights_file, sess)\n \n begin_at_epoch, costs, dev_costs, best_dev_accuracy, dev_accuracies, train_accuracies = self.restoreSession(last_saver, sess, restore_from, is_training = True)\n \n for epoch in range(self.params.num_epochs):\n count_batch = 0\n print (\"epoch: \", epoch + 1)\n minibatch_cost = 0.\n minibatch_accuracy = 0.\n num_minibatches = (m + self.params.train_batch_size - 1) // self.params.train_batch_size\n\n minibatches = random_mini_batches(X_train, Y_train, self.params.train_batch_size)\n \n for minibatch in minibatches:\n # Select a minibatch\n (minibatch_X, minibatch_Y) = minibatch\n _ , temp_cost, temp_accuracy = sess.run([optimizer, cost, accuracy], feed_dict={model.X: minibatch_X, model.Y: minibatch_Y})\n \n # compute training cost\n minibatch_cost += temp_cost / num_minibatches\n minibatch_accuracy += temp_accuracy / num_minibatches\n\n # Print result\n if (count_batch % 10) == 0:\n print(\"count_batch\",count_batch,\"temp_cost:\", temp_cost, \"temp_accuracy:\", temp_accuracy)\n count_batch += 1\n \n costs.append(minibatch_cost) \n \n # compute dev cost\n dev_cost, dev_accuracy = self.evaluate(X_dev, Y_dev, sess)\n dev_costs.append(dev_cost)\n dev_accuracies.append(dev_accuracy)\n train_accuracies.append(minibatch_accuracy)\n\n if print_cost == True and epoch % 1 == 0:\n print (\"Cost after epoch %i: %f\" % (begin_at_epoch + epoch + 1, minibatch_cost)) \n print (\"Accuracy after epoch %i: %f\" % (begin_at_epoch + epoch + 1, minibatch_accuracy)) \n print (\"dev_Cost after epoch %i: %f\" % (begin_at_epoch + epoch + 1, dev_cost))\n print (\"dev_accuracy after epoch %i: %f\" % (begin_at_epoch + epoch + 1, dev_accuracy))\n \n # Save best sess\n if dev_accuracy > best_dev_accuracy:\n best_dev_accuracy = dev_accuracy\n best_save_path = os.path.join(model_dir, 'best_weights', 'after-epoch')\n best_saver.save(sess, best_save_path, global_step = begin_at_epoch + epoch + 1)\n if not (os.path.exists(os.path.join(model_dir,'last_weights'))):\n os.makedirs(os.path.join(model_dir,'last_weights'))\n np.save(os.path.join(model_dir,'last_weights', \"best_dev_accuracy\"), [best_dev_accuracy])\n\n # Save sess and costs\n last_save_path = os.path.join(model_dir, 'last_weights', 'after-epoch')\n last_saver.save(sess, last_save_path, global_step = begin_at_epoch + epoch + 1)\n np.save(os.path.join(model_dir,'last_weights', \"costs\"), costs)\n np.save(os.path.join(model_dir,'last_weights', \"dev_costs\"), dev_costs) \n np.save(os.path.join(model_dir,'last_weights', \"dev_accuracies\"), dev_accuracies)\n np.save(os.path.join(model_dir,'last_weights', \"train_accuracies\"), train_accuracies)\n\n def evaluate(self, X_test, Y_test, sess):\n # Evaluate the dev set. Used inside a session.\n m = X_test.shape[0]\n model = self.model\n accuracy = model.accuracy\n logits = model.logits\n cost = model.cost \n\n minibatches = random_mini_batches(X_test, Y_test, self.params.test_batch_size)\n minibatch_cost = 0.\n minibatch_accuracy = 0.\n num_minibatches = (m + self.params.test_batch_size - 1) // self.params.test_batch_size\n\n count_batch=0\n for minibatch in minibatches:\n # Select a minibatch\n (minibatch_X, minibatch_Y) = minibatch\n temp_cost, temp_accuracy, predictions = sess.run([cost, accuracy, model.predictions], feed_dict={model.X: minibatch_X, model.Y: minibatch_Y})\n # compute dev cost\n minibatch_cost += temp_cost / num_minibatches\n minibatch_accuracy += temp_accuracy / num_minibatches\n\n # Print result\n #if (count_batch % 10) == 0:\n print(\"dev_count_batch\",count_batch,\"dev_temp_cost:\", temp_cost, \"dev_temp_accuracy:\", temp_accuracy)\n count_batch += 1\n\n return minibatch_cost, minibatch_accuracy\n\n def predict(self, X_test, Y_test, restore_from):\n # Make prediction. Used outside a session.\n m = X_test.shape[0]\n model = self.model\n accuracy = model.accuracy\n logits = model.logits\n probs = model.probs\n cost = model.cost \n prediction = model.predictions\n\n last_saver = tf.train.Saver(max_to_keep = 1)\n with tf.Session() as sess:\n # init = tf.global_variables_initializer()\n # sess.run(init)\n if (self.weights_file is not None) and (restore_from is None):\n model.load_weights(self.weights_file, sess)\n \n self.restoreSession(last_saver, sess, restore_from, False)\n \n '''\n for i in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):\n print i.name\n if i.name == \"model/conv1_1/weights:0\":\n print sess.run(i)\n break\n '''\n\n # print \"weights:\", sess.run(model.parameters[0])\n predict_costs = np.zeros(m)\n predict_accuracy = np.zeros(m)\n predict_predictions = np.zeros(m)\n\n predict_logits = np.zeros((m, self.params.num_classes))\n predict_probs = np.zeros((m, self.params.num_classes))\n \n for i in range(m):\n predict_costs[i], predict_logits[i, :], predict_accuracy[i], predict_probs[i, :], predict_predictions[i] = sess.run([cost, logits, accuracy, probs, prediction], feed_dict={model.X: X_test[i:i+1], model.Y: Y_test[i:i+1]})\n\n return predict_costs, predict_logits, predict_accuracy, predict_probs, predict_predictions\n\n\n","sub_path":"model/train_evaluate.py","file_name":"train_evaluate.py","file_ext":"py","file_size_in_byte":8749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"61449158","text":"import ast\nimport os\nimport pandas as pd\n\n\ndata_folder = \"../../data\"\nnum_of_workpiece = 10\nworkpiece_list = [f\"wp_{idx + 1}\" for idx in range(num_of_workpiece)]\ninput_filename = \"input_1\"\nscore_type = \"score\"\nthreshold = 0.9\ninput_folder = f\"{data_folder}/raw/{input_filename}/dependent_qc/{score_type}/{num_of_workpiece}_workpiece\"\noutput_folder = f\"{data_folder}/postprocessed/dependent_qc/{score_type}/{num_of_workpiece}_workpiece/per_input\"\nif not os.path.exists(output_folder):\n # Create target Directory\n os.makedirs(output_folder)\n print(\"Directory \", output_folder, \" Created \")\nelse:\n print(\"Directory \", output_folder, \" already exists\")\n\nheader = {\n \"input_filename\": [],\n \"threshold\": [],\n \"TP\": [],\n \"TN\": [],\n \"FP\": [],\n \"FN\": [],\n \"production_time\": [],\n \"transport_time\": [],\n \"inspection_time\": [],\n \"network_time\": [],\n \"total_time\": [],\n \"accuracy\": [],\n \"miss\": [],\n \"precision\": [],\n \"recall\": [],\n \"F1\": [],\n \"transport_time_norm\": [],\n \"inspection_time_norm\": [],\n \"network_time_norm\": [],\n \"total_time_norm\": [],\n}\nwriter = pd.ExcelWriter(f\"{output_folder}/dependent_qc_{score_type}_{num_of_workpiece}_workpiece_{input_filename}.xlsx\", mode=\"w\")\nfor filename in os.listdir(input_folder): # Each results\n if f\"{input_filename}\" in filename:\n additional_header = {\n f\"wc_{i + 1}\": []\n for i in range(17)\n }\n temp = header.copy()\n temp.update(additional_header)\n compiled_df = pd.DataFrame(temp)\n df = pd.read_csv(f\"{input_folder}/{filename}\")\n df = df.sort_values(by=['workpiece_id', 'time_step'])\n print(df.head())\n df.loc[:, 'actual_quality'] = df.loc[:, 'actual_quality'].ffill()\n df.loc[:, 'state'] = df.loc[:, 'state'].ffill()\n data = dict()\n consent_df = df[df[\"consent\"] == 1]\n # Compile qc score through row by row iteration\n for row in consent_df.itertuples():\n winner_qc_list = row.winner_qc_list\n winner_qc_list = ast.literal_eval(winner_qc_list)\n for qc, score in winner_qc_list:\n data[qc] = score # Keep overwriting the score until the last time the qc appear\n # Get confusion matrix data\n tp_df = consent_df[(consent_df[\"state\"].isin([\"['Fail']\", \"['Pass', 'Fail']\"]))\n & (consent_df[\"qc_results\"] == \"['Fail']\")]\n tn_df = consent_df[(consent_df[\"state\"] == \"['Pass']\") & (consent_df[\"qc_results\"] == \"['Pass']\")]\n fp_df = consent_df[(consent_df[\"state\"] == \"['Fail']\") & (consent_df[\"qc_results\"] == \"['Pass']\")]\n fn_df = consent_df[(consent_df[\"state\"] == \"['Pass']\") & (consent_df[\"qc_results\"] == \"['Fail']\")]\n data[\"input_filename\"] = input_filename\n data[\"threshold\"] = threshold\n data[\"TP\"] = len(tp_df)\n data[\"TN\"] = len(tn_df)\n data[\"FP\"] = len(fp_df)\n data[\"FN\"] = len(fn_df)\n data[\"production_time\"] = df.production_time.sum()\n data[\"transport_time\"] = df.transport_time.sum()\n data[\"inspection_time\"] = df.inspection_time.sum()\n max_time_step = df.time_step.max() # v the last step must be inspection.\n data[\"total_time\"] = max_time_step + df[df.time_step == max_time_step].inspection_time.values[0]\n # Normalizing network time, which is elapsed time per RESTapi request\n network_time_unit = df.loc[df[\"network_time\"].first_valid_index()][\"network_time\"] # first value\n data[\"network_time\"] = df.network_time.sum() / network_time_unit * 0.003 # each unit is 0.003 s\n # Get confusion matrix and norm time\n data[\"accuracy\"] = (data[\"TP\"] + data[\"TN\"]) / (data[\"TP\"] + data[\"TN\"] + data[\"FP\"] + data[\"FN\"])\n data['miss'] = data[\"FN\"] / (data[\"TP\"] + data[\"FN\"])\n data['precision'] = data[\"TP\"] / (data[\"TP\"] + data[\"FP\"])\n data['recall'] = data[\"TP\"] / (data[\"TP\"] + data[\"FN\"])\n data['F1'] = 2 * data[\"TP\"] / (2 * data[\"TP\"] + data[\"FP\"] + data[\"FN\"])\n data[\"transport_time_norm\"] = data[\"transport_time\"] / data[\"production_time\"]\n data[\"inspection_time_norm\"] = data[\"inspection_time\"] / data[\"production_time\"]\n data[\"network_time_norm\"] = data[\"network_time\"] / data[\"production_time\"]\n data[\"total_time_norm\"] = data[\"total_time\"] / data[\"production_time\"]\n # print(data)\n compiled_df = compiled_df.append(data, ignore_index=True)\n # print(compiled_df)\n df.to_excel(writer, sheet_name=\"raw\", index=False)\n compiled_df.to_excel(writer, sheet_name=\"processed\", index=False)\nwriter.save()\nwriter.close()\n\n\n","sub_path":"elena/analysis/dependent_qc/dependent_qc_score_per_input.py","file_name":"dependent_qc_score_per_input.py","file_ext":"py","file_size_in_byte":4670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"474596370","text":"from function_new import even_or_odd\n\n# def function_name(arguments, agruments2, arguments3):\n# pass\n\n\n\n\n\nn = int(input(\"Enter Number:\"))\n\neven_or_odd(n)\n\n\ndef fibonacci_series(n: int, fib_list: list) -> list:\n i = 0\n j = 1\n while n>0:\n fib_list.append(fib_list[i] + fib_list[j])\n i +=1\n j +=1\n n -=1\n return fib_list\n#\n# #\n# # fib_list = fibonacci_series(fib_list=[0,1], n=n)\n# #\n# # fib_list2 = fibonacci_series(20, [0,1])\n# # print(fib_list)\n# # print(fib_list2)\n# # fib_list_legth_20 = fibonacci_series(20,[0,1])\n# # print(fib_list_legth_20)\n#\n#\n#\n# def function_name(a:int, b: int, c: list, s=15):\n# print(f\"a={a}\")\n# print(f\"b={b}\")\n# print(f\"c={c}\")\n# print(f\"s={s}\")\n# # for item in args:\n# # print(item)\n# #\n# # for key,val in kwargs.items():\n# # print(f\"{key} --> {val}\")\n#\n#\n# sum = function_name(a=1,b=2, c=10)\n# print(sum)","sub_path":"bitbybit/programs/functions_impl.py","file_name":"functions_impl.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"475663740","text":"#!/usr/bin/python\n\nfrom os import system, path\t\t\t\nfrom time import sleep, strftime\nimport sys\n\nsystem('modprobe w1-gpio')\nsystem('modprobe w1-therm')\n\n# directory \ndir_temp = \"/sys/bus/w1/devices/w1_bus_master1/28-031661fa43ff/w1_slave\"\n\n#write temperature history \nt_file = open(path.join('/home/pi/Working','result.txt'), 'w')\n\n\n# read temperature\ndef read_file(file):\n f = open(file, 'r')\n lines = f.readlines()\n f.close()\n return lines\n\t\n#Write temperature to text file\ndef wtofile(value):\n\n\tif t_file:\n\t\tt_file.write(value)\n\telse:\n\t\tprint(\"need to create a new file\")\n\t#write to file in real-time\t\n\tsys.stdout.flush()\n\tt_file.flush()\n\ndef convert_temp(temp):\t\n\n\ttemp_raw = temp.split(\"=\")[1]\t\n\ttemp_final = round(int(temp_raw) / 1000, 2)\t\n\treturn temp_final\n\n\t\nif __name__ == '__main__':\n\n\twhile True:\n\t\t\n\t\tlines = read_file(dir_temp)\t\t\n\t\ttemp = convert_temp(lines[1])\n\t\t\n\t\t# write ambient temperature every hours\n\t\tif strftime(\"%M:%S\")== \"00:00\":\n\t\t\t\n\t\t\tval = strftime(\"%A, %H:%M:%S\")+ \" Temperature : \"+ str(temp)+\"\\n\"\t\t \n\t\t\twtofile(val)\n\t\t\n\t\t#print (strftime(\"%A, %H:%M:%S\"),\"temperature\",\"=\",temp) # affichage a l'ecran\t\t\n","sub_path":"temperature.py","file_name":"temperature.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"48674674","text":"from presidio_analyzer import Pattern, PatternRecognizer\n\n\nclass UKNINORecognizer(PatternRecognizer):\n \"\"\"\n Recognizes National insurance number using regex\n \"\"\"\n\n # pylint: disable=line-too-long,abstract-method\n # Weak pattern: National insurance number are a weak match, e.g., JG 12 13 16 A, AB123456C\n PATTERNS = [\n Pattern(\"NINO (very weak)\",\n r\"[A-Z]{2}?[ ]?[0-9]{2}[ ]?[0-9]{2}[ ]?[0-9]{2}[ ]?[ ]?[A-Z],?[ ]?[A-CEGHJ-PR-TW-Z][A-CEGHJ-NPR-TW-Z]{1}[0-9]{6}[A-DFM]?\",\n 0.5),\n ]\n CONTEXT = [\"National insurance number\", \"national insurance number\"]\n\n def __init__(\n self,\n patterns=None,\n context=None,\n supported_language=\"en\",\n supported_entity=\"UK_NINO\",\n ):\n context = context if context else self.CONTEXT\n patterns = patterns if patterns else self.PATTERNS\n super().__init__(\n supported_entity=supported_entity,\n patterns=patterns,\n context=context,\n supported_language=supported_language,\n )\n","sub_path":"presidio-analyzer/presidio_analyzer/predefined_recognizers/uk_nino_recognizer.py","file_name":"uk_nino_recognizer.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"517285982","text":"\"\"\"\n@Project :20180821\n@Time :2018/8/21 17:18\n@Author :Zhenxian\n@File :服务端.py\n@Software :PyCharm\n\"\"\"\nimport socket\nimport struct\nimport json\nimport subprocess\n\nHOST = '127.0.0.1'\nPORT = 8080\nADDRESS = (HOST, PORT)\nBUFF_SIZE = 1024\n\nss = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nss.bind(ADDRESS)\nss.listen(5)\n\nwhile 1:\n conn, add = ss.accept()\n while 1:\n cmd = conn.recv(BUFF_SIZE)\n if not cmd:\n break\n print(\"cmd:%s\", cmd)\n\n res = subprocess.Popen(cmd.decode('utf-8'),\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n error = res.stderr.read()\n print(error)\n if error:\n back_msg = error\n else:\n back_msg = res.stdout.read()\n\n header = {'data_size': len(back_msg)}\n header_json = json.dumps(header)\n header_json_bytes = bytes(header_json, encoding='utf-8')\n\n conn.send(struct.pack('i', len(header_json_bytes)))\n conn.send(header_json_bytes)\n conn.sendall(back_msg)\n conn.close()\n","sub_path":"20180821/struct解决黏包2/服务端.py","file_name":"服务端.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"217156342","text":"# $Id$\n'''\nSpeculative code for doubly-charmed baryon searches.\nThe following six final states are used:\n Xicc+ -> Lambdac+ K- pi+\n Xicc++ -> Lambdac+ K- pi+ pi+\n Xicc+ -> Xic0 pi+\n Xicc++ -> Xic0 pi+ pi+\n Xicc+ -> Xic+ pi+ pi-\n Xicc++ -> Xic+ pi+\nwhere the daughter charmed baryon is reconstructed via:\n Lambdac+ -> p K- pi+ (from StdLooseLambdac2PKPi)\n Xic0 -> Xi- pi+\n Xic+ -> Xi- pi+ pi+\nand where the Xi- is reconstructed via:\n Xi- -> Lambda pi-, Lambda -> p pi- (from StdLooseLambdaDD and StdLooseLambdaLL)\nIn addition to the six Xicc signal lines, three control lines are written out\nfor the Lambdac+, Xic0, and Xic+. It will certainly be necessary to prescale\nthe Lambdac+ control line, and perhaps the others too.\n'''\n\n__author__ = ['Mat Charles']\n__date__ = '24/02/2011'\n__version__ = '$Revision: 1.1 $'\n\n__all__ = ('XiccBuilder', 'makeLc', 'filterKaons', 'filterPions', 'makeXi', 'makeXicc', 'makeXicZero', 'makeXicPlus')\n\n\nfrom Gaudi.Configuration import *\nfrom GaudiConfUtils.ConfigurableGenerators import FilterDesktop, CombineParticles\nfrom PhysSelPython.Wrappers import Selection, DataOnDemand\nfrom StrippingConf.StrippingLine import StrippingLine\nfrom StrippingUtils.Utils import LineBuilder\n\ndefault_name = \"Xicc\"\n\nclass XiccBuilder(LineBuilder) :\n \"\"\"\n Search for Xicc\n \"\"\"\n\n __configuration_keys__ = ('LongTrackGEC'\n ,'controlPrescaleLc'\n ,'controlPrescaleXic'\n ,'signalPrescaleViaLc'\n ,'signalPrescaleViaXic')\n\n def __init__(self, name, config) :\n\n LineBuilder.__init__(self, name, config)\n\n # Set up global event cuts.\n # Conceptually these come first, although the place where they're\n # inserted into the line is at the bottom of the code.\n _globalEventCuts = \"(recSummary (LHCb.RecSummary.nLongTracks, 'Rec/Track/Long') < %(LongTrackGEC)s )\" % config\n\n # Pick up standard kaons, pions\n # Filter them for use as daughter particles:\n self.dauPi = filterPions(name+'FilteredPions')\n self.dauK = filterKaons(name+'FilteredKaons')\n\n # Pick up standard Lambdac -> p K- pi+ then filter it to reduce rate:\n self.filterLc = makeLc(name+'FilterLc')\n self.filterLcForControl = filterLcForControl(name+'FilterLcForControl', self.filterLc)\n\n # Some generic cuts for Xicc. Vertex chi2 cut depends on number of daughters (2 dau => 1 NDF; 3 dau => 3 NDF; 4 dau => 5 NDF)\n _strCutComb = '(APT>2000.0*MeV)'\n _strCutMoth2 = '( (BPVDIRA > 0.999) & (VFASPF(VCHI2)<20.0) & (BPVVDCHI2 > 16) & (M < 4500*MeV) & (CHILD(VFASPF(VZ),1) - VFASPF(VZ) > 0.01*mm) )'\n _strCutMoth3 = '( (BPVDIRA > 0.999) & (VFASPF(VCHI2)<30.0) & (BPVVDCHI2 > 16) & (M < 4500*MeV) & (CHILD(VFASPF(VZ),1) - VFASPF(VZ) > 0.01*mm) )'\n _strCutMoth4 = '( (BPVDIRA > 0.999) & (VFASPF(VCHI2)<60.0) & (BPVVDCHI2 > 16) & (M < 4500*MeV) & (CHILD(VFASPF(VZ),1) - VFASPF(VZ) > 0.01*mm) )'\n\n # Combine Lambda with pion to make Xi-\n self.stdLambdaLL = DataOnDemand(Location = 'Phys/StdLooseLambdaLL/Particles')\n self.stdLambdaDD = DataOnDemand(Location = 'Phys/StdLooseLambdaDD/Particles')\n self.combineXiLL = makeXi(name+'CombineXiLL', self.stdLambdaLL, 50, 35)\n self.combineXiDD = makeXi(name+'CombineXiDD', self.stdLambdaDD, 80, 50)\n\n # Combine Xi- with pion(s) to make Xic0, Xic+\n self.combineXicZero = makeXicZero(name+\"CombineXicZero\", [ self.combineXiLL, self.combineXiDD, self.dauPi ])\n self.combineXicPlus = makeXicPlus(name+\"CombineXicPlus\", [ self.combineXiLL, self.combineXiDD, self.dauPi ])\n\n # Combine Lc+ with a K and a pi to make a Xicc+ or Xicc++:\n self.combineXicc1 = makeXicc(name+'CombineXicc1', [ self.filterLc, self.dauPi, self.dauK ], '[Xi_cc+ -> Lambda_c+ K- pi+]cc', _strCutComb, _strCutMoth3)\n self.combineXicc2 = makeXicc(name+'CombineXicc2', [ self.filterLc, self.dauPi, self.dauK ], '[Xi_cc++ -> Lambda_c+ K- pi+ pi+]cc', _strCutComb, _strCutMoth4)\n # Combine Xic0/+ with pion(s) to make Xicc+, Xicc++\n self.combineXicc3 = makeXicc(name+'CombineXicc3', [ self.combineXicZero, self.dauPi ], '[Xi_cc+ -> Xi_c0 pi+]cc', _strCutComb, _strCutMoth2)\n self.combineXicc4 = makeXicc(name+'CombineXicc4', [ self.combineXicZero, self.dauPi ], '[Xi_cc++ -> Xi_c0 pi+ pi+]cc', _strCutComb, _strCutMoth3)\n self.combineXicc5 = makeXicc(name+'CombineXicc5', [ self.combineXicPlus, self.dauPi ], '[Xi_cc+ -> Xi_c+ pi+ pi-]cc', _strCutComb, _strCutMoth3)\n self.combineXicc6 = makeXicc(name+'CombineXicc6', [ self.combineXicPlus, self.dauPi ], '[Xi_cc++ -> Xi_c+ pi+]cc', _strCutComb, _strCutMoth2)\n\n\n # Control lines (to be prescaled!)\n self.lineControl1 = StrippingLine(name+'ControlLc',\n prescale = config['controlPrescaleLc'],\n postscale = 1.0,\n FILTER = _globalEventCuts,\n selection = self.filterLcForControl)\n self.lineControl2 = StrippingLine(name+'ControlXicZero',\n prescale = config['controlPrescaleXic'],\n postscale = 1.0,\n FILTER = _globalEventCuts,\n selection = self.combineXicZero)\n self.lineControl3 = StrippingLine(name+'ControlXicPlus',\n prescale = config['controlPrescaleXic'],\n postscale = 1.0,\n FILTER = _globalEventCuts,\n selection = self.combineXicPlus)\n\n # Physics lines\n self.lineXicc1 = StrippingLine(name+'XiccPlusToLcKPi',\n prescale = config['signalPrescaleViaLc'],\n postscale = 1.0,\n FILTER = _globalEventCuts,\n selection = self.combineXicc1)\n self.lineXicc2 = StrippingLine(name+'XiccPlusPlusToLcKPiPi',\n prescale = config['signalPrescaleViaLc'],\n postscale = 1.0,\n FILTER = _globalEventCuts,\n selection = self.combineXicc2)\n self.lineXicc3 = StrippingLine(name+'XiccPlusToXicZeroPi',\n prescale = config['signalPrescaleViaXic'],\n postscale = 1.0,\n FILTER = _globalEventCuts,\n selection = self.combineXicc3)\n self.lineXicc4 = StrippingLine(name+'XiccPlusPlusToXicZeroPiPi',\n prescale = config['signalPrescaleViaXic'],\n postscale = 1.0,\n FILTER = _globalEventCuts,\n selection = self.combineXicc4)\n self.lineXicc5 = StrippingLine(name+'XiccPlusToXicPlusPiPi',\n prescale = config['signalPrescaleViaXic'],\n postscale = 1.0,\n FILTER = _globalEventCuts,\n selection = self.combineXicc5)\n self.lineXicc6 = StrippingLine(name+'XiccPlusPlusToXicPlusPi',\n prescale = config['signalPrescaleViaXic'],\n postscale = 1.0,\n FILTER = _globalEventCuts,\n selection = self.combineXicc6)\n\n self.registerLine(self.lineControl1)\n self.registerLine(self.lineControl2)\n self.registerLine(self.lineControl3)\n self.registerLine(self.lineXicc1)\n self.registerLine(self.lineXicc2)\n self.registerLine(self.lineXicc3)\n self.registerLine(self.lineXicc4)\n self.registerLine(self.lineXicc5)\n self.registerLine(self.lineXicc6)\n\n\ndef makeLc(localName) :\n # Pick up standard Lambdac -> p K- pi+\n _stdLc = DataOnDemand(Location = 'Phys/StdLooseLambdac2PKPi/Particles')\n\n # Filter to reduce rate:\n _strCutK = '( CHILD(PIDK,1) - CHILD(PIDpi,1) > 5.0 )'\n _strCutp = '( CHILD(PIDp,2) - CHILD(PIDpi,2) > 5.0 )'\n _strCutpi = '( CHILD(PIDpi,3) - CHILD(PIDK,3) > 0.0 )'\n _strCutTrackChi2 = '( (CHILD(TRCHI2DOF,1)<4.0) & (CHILD(TRCHI2DOF,2)<4.0) & (CHILD(TRCHI2DOF,3)<4.0) )'\n _strCutDIRA = '( BPVDIRA > 0.95 )'\n _strCutFD = '( BPVVDCHI2 > 25 )'\n _strCutMass = '( (M > 2185.0*MeV) & (M < 2385*MeV) )'\n _strCutIP = '( NINGENERATION( (MIPCHI2DV(PRIMARY) > 30.0), 1) >= 1 )'\n _strCutLc = '(' + _strCutK + '&' + _strCutpi + '&' + _strCutp + '&' + _strCutTrackChi2 + '&' + _strCutDIRA + '&' + _strCutFD + '&' + _strCutMass + '&' + _strCutIP + ')'\n _filterLc = FilterDesktop(Code = _strCutLc)\n return Selection ( localName,\n Algorithm = _filterLc,\n RequiredSelections = [ _stdLc ] )\n\ndef filterLcForControl(localName, inputSel) :\n # Apply additional cuts for prompt Lc:\n _strCutDIRA = '( BPVDIRA > 0.999 )'\n _filterLc = FilterDesktop(Code = _strCutDIRA)\n return Selection ( localName,\n Algorithm = _filterLc,\n RequiredSelections = [ inputSel ] )\n\ndef filterKaons(localName) :\n # Pick up standard input list\n _stdK = DataOnDemand(Location = 'Phys/StdLooseKaons/Particles')\n # Filter:\n _strCutDauK = '( (P>2.0*GeV) & (PIDK-PIDpi>5.0) & (TRCHI2DOF<4.0) & (PT>250.0*MeV) & (MIPCHI2DV(PRIMARY)>4.0) )'\n _filterK = FilterDesktop(Code = _strCutDauK)\n return Selection ( localName,\n Algorithm = _filterK,\n RequiredSelections = [ _stdK ] )\n\ndef filterPions(localName) :\n # Pick up standard input list\n _stdPi = DataOnDemand(Location = 'Phys/StdLoosePions/Particles')\n # Filter:\n _strCutDauPi = '( (P>2.0*GeV) & (PIDpi-PIDK>0.0) & (TRCHI2DOF<4.0) & (PT>250.0*MeV) & (MIPCHI2DV(PRIMARY)>4.0) )'\n _filterPi = FilterDesktop(Code = _strCutDauPi)\n return Selection ( localName,\n Algorithm = _filterPi,\n RequiredSelections = [ _stdPi ] )\n\ndef makeXi(localName, inputList, cutWide, cutTight) :\n _stdPi = DataOnDemand(Location = 'Phys/StdLoosePions/Particles')\n _strCutPiForXi = '( (P>2.0*GeV) & (TRCHI2DOF<4.0) & (PT>250.0*MeV) & (MIPCHI2DV(PRIMARY)>25.0) )'\n _strCutCombXi = \"( ADAMASS('Xi-') < %(cutWide)s * MeV )\" % locals()\n _strCutMothXi = \"( ( ADMASS('Xi-') < %(cutTight)s * MeV ) & (VFASPF(VCHI2)<20) )\" % locals()\n _combineXi = CombineParticles( DecayDescriptor = '[Xi- -> Lambda0 pi-]cc',\n DaughtersCuts = { \"pi-\": _strCutPiForXi },\n CombinationCut = _strCutCombXi,\n MotherCut = _strCutMothXi )\n return Selection ( localName,\n Algorithm = _combineXi,\n RequiredSelections = [ inputList, _stdPi ] )\n\n\ndef makeXicc(localName, inputSelections, decay, cutComb, cutMoth) :\n _combineXicc = CombineParticles( DecayDescriptor = decay,\n CombinationCut = cutComb,\n \t \t \t \t MotherCut = cutMoth )\n return Selection ( localName,\n Algorithm = _combineXicc,\n RequiredSelections = inputSelections)\n\ndef makeXicZero(localName, inputSelections) :\n _strCutCombXicZero = \"( ADAMASS('Xi_c0') < 170*MeV )\"\n _strCutMothXicZero = \"( ( ADMASS('Xi_c0') < 120*MeV ) & ( BPVDIRA > 0.9 ) & ( BPVVDCHI2 > 25 ) & (VFASPF(VCHI2)<30) )\"\n _combineXicZero = CombineParticles( DecayDescriptor = '[Xi_c0 -> Xi- pi+]cc',\n CombinationCut = _strCutCombXicZero,\n MotherCut = _strCutMothXicZero )\n return Selection( localName,\n Algorithm = _combineXicZero,\n RequiredSelections = inputSelections )\n\ndef makeXicPlus(localName, inputSelections) :\n _strCutCombXicPlus = \"( ( ADAMASS('Xi_c+') < 170*MeV ) & ( AHASCHILD( (ABSID == 'pi+') & (MIPCHI2DV(PRIMARY) > 10.0) ) ) )\"\n _strCutMothXicPlus = \"( ( ADMASS('Xi_c+') < 120*MeV ) & ( BPVDIRA > 0.9 ) & ( BPVVDCHI2 > 25 ) & (VFASPF(VCHI2)<60) )\"\n _combineXicPlus = CombineParticles( DecayDescriptor = '[Xi_c+ -> Xi- pi+ pi+]cc',\n CombinationCut = _strCutCombXicPlus,\n MotherCut = _strCutMothXicPlus )\n return Selection(localName,\n Algorithm = _combineXicPlus,\n RequiredSelections = inputSelections )\n","sub_path":"DaVinciDev_v38r1p1/Phys/StrippingArchive/python/StrippingArchive/Stripping17/StrippingXicc.py","file_name":"StrippingXicc.py","file_ext":"py","file_size_in_byte":13043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"629222898","text":"import random\n\ndef ChooseMove():\n global z\n rockpopularity = int((open(\"/Users/timothy/Documents/rock.txt\",\"r\")).read())\n scissorspopularity = int((open(\"/Users/timothy/Documents/scissors.txt\",\"r\")).read())\n paperpopularity = int((open(\"/Users/timothy/Documents/paper.txt\",\"r\")).read())\n\n if rockpopularity > scissorspopularity:\n if rockpopularity > paperpopularity:\n return 1\n elif rockpopularity == paperpopularity:\n return random.choice([1,2])\n elif paperpopularity > scissorspopularity:\n return 2\n elif scissorspopularity > rockpopularity:\n if scissorspopularity > paperpopularity:\n return 0 \n elif paperpopularity > scissorspopularity:\n return 2\n else:\n return random.choice([0,1])\n else:\n if paperpopularity == rockpopularity:\n return random.randint(0,1)\n else:\n return random.choice([0,1])\n\nwhile True:\n move = [\"rock\", \"paper\", \"scissors\"]\n movenumber = 0\n computermove = ChooseMove()\n go = 1\n while go == 1:\n playermove = move.index(input(\"What move do you want to choose: \"))\n popularity = str(int(open(\"/Users/timothy/Documents/\" + move[playermove] + \".txt\",\"r\").read()) + 1)\n (open(\"/Users/timothy/Documents/\" + move[playermove] + \".txt\",\"w\")).write(popularity)\n go = 2\n print ()\n print (\"The computer picked \" + move[computermove])\n print ()\n if playermove - computermove == 1:\n print (\"Player won\")\n elif playermove - computermove == 2:\n print (\"Player lost\")\n elif playermove - computermove == -1:\n print (\"Player lost\")\n elif playermove - computermove == -2:\n print (\"Player won\")\n else:\n print (\"It's a draw\")","sub_path":"rock paper scissors.py","file_name":"rock paper scissors.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"158039035","text":"import librosa\r\nimport numpy as np\r\nfrom getlist import get_list\r\nfrom python_speech_features import mfcc,delta\r\n\r\ndef get_mfcc(filename):\r\n x, fs = librosa.load(filename, sr=16000, mono=True)\r\n x, idx = librosa.effects.trim(x, 20)\r\n feat0 = mfcc(x, fs)\r\n feat1 = delta(feat0, 1)\r\n feat2 = delta(feat0, 2)\r\n return np.hstack((feat0, feat1, feat2))\r\n\r\nif __name__ == \"__main__\":\r\n\r\n wavlist, train, test = get_list('new_data')\r\n feats = get_mfcc(wavlist[-1])\r\n print(feats.shape)\r\n","sub_path":"1 TDNN/feats.py","file_name":"feats.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"530572081","text":"import math\r\n\r\nprint(\"Reactor Size Calculator\")\r\n\r\nprint()\r\nprint(\"Specifications:\")\r\n# Conditions of reaction and flow rate of reactants into reactor\r\nflowrate = float(input(\"Please enter the total flow rate of the reactants entering the reactor: (kmol/hr) \"))\r\npressureReaction = float(input(\"At what presssure does the reactor need to be at during the reaction: (bar) \"))\r\ntempReaction = float(input(\"At what temperature will the reaction be taking place at: (Degrees Kelvin) \"))\r\nuniGasConstant = 8.314\r\nprint()\r\nprint(\"Catalyst\")\r\nthickness = float(input(\"Please enter the thickness of the thickness of the catalyst bed: (cm) \"))\r\nresidenceTime = float(input(\"How long is the residence time of the reactants in the catalyst? (seconds) \"))\r\nvoidage = float(input(\"Please enter the typical voidage of the catalyst bed in the reactor as a decimal: \"))\r\n\r\n# Calculation of volumetric flow rate of reactants into reactor\r\nprint()\r\nvolumeFlowRateHour = ((flowrate*1000)*(uniGasConstant)*tempReaction)/(pressureReaction*100000)\r\nvolumeFlowRateSecond = volumeFlowRateHour/3600\r\n\r\n# Calculating diameter of reactor\r\nradius = math.sqrt((volumeFlowRateSecond*residenceTime)/(math.pi*(thickness/100)))\r\nareaCatalystBed = ((math.pi * math.pow(radius,2))/(voidage))\r\ndiameterReactor = math.sqrt((4*areaCatalystBed)/math.pi)\r\n\r\nprint(\"Diameter Of Reactor = {:.2f} meters\".format(diameterReactor))\r\n","sub_path":"ChemReactorCalculator.py","file_name":"ChemReactorCalculator.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"598531232","text":"#Создай собственный Шутер!\r\nfrom pygame import *\r\nfrom random import randint\r\nfrom time import time as timer\r\nmixer.init()\r\nlast_time = timer()\r\nnum_fire = 0\r\nrec_time = False\r\nlife = 3\r\nwin_w = 800\r\nwin_h = 600\r\nclass GameSprite(sprite.Sprite):\r\n def __init__(self,p_im,p_x,p_y,p_s,p_h,p_w):\r\n super().__init__()\r\n self.image =transform.scale(image.load(p_im),(p_h,p_w))\r\n self.speed = p_s\r\n self.rect = self.image.get_rect()\r\n self.rect.x = p_x\r\n self.rect.y = p_y\r\n def reset(self):\r\n window.blit(self.image,(self.rect.x,self.rect.y))\r\nclass Player(GameSprite):\r\n def update(self):\r\n keys = key.get_pressed()\r\n if keys[K_LEFT] and self.rect.x > 20:\r\n self.rect.x -= self.speed\r\n if keys[K_RIGHT] and self.rect.x < win_w - 80:\r\n self.rect.x += self.speed\r\n def fire(self):\r\n kick = mixer.Sound(\"fire.ogg\")\r\n kick.play()\r\n bullet = Bullet(\"knife.png\",self.rect.centerx-10,self.rect.top,-10,50,50)\r\n Bullets.add(bullet)\r\nclass Bullet(GameSprite):\r\n def update(self):\r\n self.rect.y+=self.speed\r\n if self.rect.y<0:\r\n self.kill\r\n\r\nclass Enemy(GameSprite):\r\n def update(self):\r\n self.rect.y+=self.speed\r\n global lost\r\n if self.rect.y>500:\r\n #self.kill()\r\n self.rect.y = 0\r\n self.rect.x = randint(10,620)\r\n lost = lost +1\r\n\r\nmixer.init()\r\nmixer.music.load(\"space.ogg\")\r\nmixer.music.play()\r\nwindow = display.set_mode((win_w,win_h))\r\ndisplay.set_caption(\"Шутер\")\r\nbackground = transform.scale(image.load(\"Столовая_(The_Skeld)2.png\"),(800,600))\r\nhero = Player(\"among_cr1.png\",5 ,530 ,4,60,60)\r\nmonsters = sprite.Group()\r\nBullets = sprite.Group()\r\nfor i in range (3):\r\n monster = Enemy(\"among_imp.png\",randint(10,620),0,randint(1,3),50,50)\r\n monsters.add(monster)\r\n monster2 = Enemy(\"among_cr2.png\",randint(10,620),0,randint(1,3),70,70)\r\n monsters.add(monster2)\r\n monster3 = Enemy(\"among_cr3.png\",randint(10,620),0,randint(1,3),50,50)\r\n monsters.add(monster3)\r\nfont.init()\r\nfont1 = font.SysFont(\"Arial\",36) \r\nclock = time.Clock()\r\nfps = 60\r\nfinish = False\r\ngame = True\r\nlost = 0\r\nscore = 0\r\nstart = True\r\nwin_im = transform.scale(image.load(\"win_im.jpg\"),(800,600))\r\nlose_im = transform.scale(image.load(\"lose_im.jpeg\"),(800,600))\r\nlast_time2 = timer()\r\nwhile game:\r\n \r\n for e in event.get():\r\n if e.type == QUIT:\r\n game = False\r\n if e.type == KEYDOWN:\r\n if e.key == K_w:\r\n if num_fire<10 and rec_time == False:\r\n num_fire +=1\r\n hero.fire()\r\n elif num_fire>=10 and rec_time == False:\r\n last_time = timer()\r\n rec_time = True\r\n if start == True:\r\n now_time2 = timer()\r\n window.blit(background,(0,0))\r\n text_int = font1.render(\"У тебя 3 жизни, выжывай,и да это нападание crewmatов\",5,(255,255,255))\r\n window.blit(text_int,(100,300))\r\n display.update()\r\n if now_time2 - last_time2 >= 2:\r\n #time.delay(9000)\r\n start = False\r\n\r\n if finish != True and start !=True:\r\n window.blit(background,(0,0))\r\n \r\n hero.update()\r\n hero.reset()\r\n text_lose = font1.render(\"Членов экипажов не убито\"+str(lost),1,(255,255,255))\r\n text_score = font1.render(\"Членов экипажов убито\"+str(score),1,(255,255,255))\r\n window.blit(text_lose,(10,30))\r\n window.blit(text_score,(30,50))\r\n monsters.draw(window)\r\n monsters.update()\r\n Bullets.draw(window)\r\n Bullets.update()\r\n if rec_time ==True:\r\n now_time = timer()\r\n if now_time - last_time>1.5:\r\n num_fire =0\r\n rec_time = False\r\n else:\r\n print('work!')\r\n text_reload = font1.render(\"Перезарядка\",1,(255,0,0))\r\n window.blit(text_reload,(50,70))\r\n\r\n if sprite.spritecollide(hero,monsters,True):\r\n life -=1\r\n monster = Enemy(\"among_imp.png\",randint(10,620),0,randint(1,2),50,50)\r\n monsters.add(monster)\r\n monster2 = Enemy(\"among_cr2.png\",randint(10,620),0,randint(1,2),70,70)\r\n monsters.add(monster2)\r\n monster3 = Enemy(\"among_cr3.png\",randint(10,620),0,randint(1,2),50,50)\r\n monsters.add(monster3)\r\n\r\n if life == 0 or lost>70:\r\n finish = True\r\n window.blit(lose_im,(0,0))\r\n\r\n if score>=150:\r\n finish = True\r\n window.blit(win_im,(0,0))\r\n collides = sprite.groupcollide(monsters,Bullets,True,True)\r\n for coll in collides:\r\n score+=1\r\n if score%2 == 0:\r\n monster = Enemy(\"among_imp.png\",randint(10,620),0,randint(1,3),50,50)\r\n monsters.add(monster)\r\n monster2 = Enemy(\"among_cr2.png\",randint(10,620),0,randint(1,3),70,70)\r\n monsters.add(monster2)\r\n monster3 = Enemy(\"among_cr3.png\",randint(10,620),0,randint(1,3),50,50)\r\n monsters.add(monster3)\r\n display.update()\r\n clock.tick(fps)","sub_path":"104580/shuter.py","file_name":"shuter.py","file_ext":"py","file_size_in_byte":5345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"181872288","text":"import logging\nimport time\nimport pytest\n\nfrom bentoml.yatai.client import get_yatai_client\nfrom bentoml.yatai.proto.repository_pb2 import BentoUri\nfrom tests.bento_service_examples.example_bento_service import ExampleBentoService\nfrom tests.yatai.local_yatai_service import (\n local_yatai_service_container,\n local_yatai_service_from_cli,\n)\n\nlogger = logging.getLogger('bentoml.test')\n\n\ndef test_sqlite_and_local_fs():\n with local_yatai_service_container() as yatai_server_url:\n yc = get_yatai_client(yatai_server_url)\n svc = ExampleBentoService()\n svc.pack('model', [1, 2, 3])\n bento_tag = f'{svc.name}:{svc.version}'\n logger.info(f'Saving BentoML saved bundle {bento_tag}')\n svc.save(yatai_url=yatai_server_url)\n\n bento_pb = yc.repository.get(bento_tag)\n assert (\n bento_pb.uri.type == BentoUri.LOCAL\n ), 'BentoService storage type mismatched, expect LOCAL'\n\n logger.info(f'Deleting saved bundle {bento_tag}')\n delete_svc_result = yc.repository.delete(bento_tag)\n assert delete_svc_result is None\n\n\n@pytest.mark.skip('Skipping Postgres test on Github Action as it continues been flaky')\ndef test_yatai_server_with_postgres_and_local_storage():\n postgres_db_url = 'postgresql://postgres:postgres@localhost/bentoml:5432'\n\n from sqlalchemy_utils import create_database\n\n create_database(postgres_db_url)\n time.sleep(60)\n\n with local_yatai_service_from_cli(db_url=postgres_db_url) as yatai_server_url:\n logger.info('Saving bento service')\n logger.info(f'yatai url is {yatai_server_url}')\n svc = ExampleBentoService()\n svc.pack('model', [1, 2, 3])\n bento_tag = f'{svc.name}:{svc.version}'\n logger.info(f'Saving BentoML saved bundle {bento_tag}')\n svc.save(yatai_url=yatai_server_url)\n\n yc = get_yatai_client(yatai_server_url)\n bento_pb = yc.repository.get(bento_tag)\n assert (\n bento_pb.uri.type == BentoUri.LOCAL\n ), 'BentoService storage type mismatched, expect LOCAL'\n","sub_path":"tests/integration/yatai_server/test_local_fs.py","file_name":"test_local_fs.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"654127374","text":"# This file will illustrate different methods available in the webdriver class\n\nfrom selenium import webdriver\nimport unittest\n\nclass WebDriverMethodsTest(unittest.TestCase):\n url = None\n browser = \"firefox\"\n\n @classmethod\n def setUpClass(cls):\n cls.driver = webdriver.Firefox()\n cls.driver.delete_all_cookies()\n cls.driver.maximize_window()\n\n WebDriverMethodsTest.url = \"https://www.staging.lendingtree.com/\"\n cls.driver.get(WebDriverMethodsTest.url)\n cls.driver.implicitly_wait(10)\n\n def test_checkURL(self):\n # Get the url from the driver and check it is same as that is opened\n print(\"url retrieved from driver: \"+ self.driver.current_url)\n assert(WebDriverMethodsTest.url==self.driver.current_url)\n\n def test_browserName(self):\n # Get the underlying browser name for webdriver\n print(\"Underlying browser: \"+self.driver.name)\n assert(self.driver.name==WebDriverMethodsTest.browser)\n\n\n def test_getPageSource(self):\n #Get the page source from the url opened\n pagesource = self.driver.page_source\n #print(\"Page Source: \\n\"+pagesource)\n assert True\n\n\n @classmethod\n def tearDownClass(cls):\n cls.driver.delete_all_cookies()\n cls.driver.close()\n cls.driver.quit()","sub_path":"Tests/WebDriverMethods.py","file_name":"WebDriverMethods.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"410444100","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n#%% Importing relevant libraries\nimport pandas as pd\nimport numpy as np\nimport pickle\nimport ast\nimport matplotlib.path as mplpath\n\n#%% Load the data. The dataset is quite big, containing over 5 million\n# observations about the e-scooters deployed in the city of Paris. It can\n# be downloaded here: \n# https://drive.google.com/file/d/1xM6f6ITr6USMoSBy9Y-9WPs5ZZ0td0HA/view?usp=sharing\nraw = pd.read_csv('~/Data/escooters.csv')\n\n# Dropping useless columns\ndf = raw.drop(['scheduler_id', 'points', 'task_time', 'task_lat', \n 'task_lng'], axis=1)\n\n#%% Count the number of missing values in the data\nna_count = []\nfor col in df.columns.values:\n na_count.append(np.sum(df[df[col].isna()]))\n# one can observe 318 nan's in the 'battery_level' column, probably caused by\n# some problem in the IoT data transfer. \n \n# Filling the nan's with battery_level mean value. \ndf['battery_level'].fillna(value=df.battery_level.mean(), inplace=True)\n\n# Given there is only a very small number of missing values (318 over \n# almos 5 million rows) it is safe to fill them with the mean value, \n# without loosing any information.\n\n#%% Check for mispellings in the categorical data\nlevels = []\nfor col in ['vendor','city','country']:\n levels.append(set(df[col]))\n# It was identified that the vendor 'Mobike' was written as 'Obike' in a \n# couple of cases, probably due to data tranfer errors during the API call.\n\n# Correcting the misspelled values\ndf['vendor'][df.vendor=='Obike'] = 'Mobike'\n\n# It is also observed that columns 'city' and 'country' have just one value\n# each ('Paris' and 'France', respectively), hence they give us no \n# interpretation power and will be dropped.\ndf.drop(['city','country'], axis=1, inplace=True)\n\n#%% Cast the desired data type for each column\ndf = df.astype({'scooter_id':'str', 'lat':'float', 'lng':'float',\n 'battery_level':'float', 'vendor':'str', 'time':'M'})\n# The 'time' column is cast to 'datetime' format, for future work. This\n# transformation however takes some time to be processed, given the number\n# of observations.\n \n#%% Check numerical data boundaries, for inconsistent data, or values out \n# of expected range\nboundaries = []\nfor col in ['lat','lng','battery_level','time']:\n boundaries.append(tuple(df[col].min(), df[col].max()))\n# It is observed that 'lat' and 'lng' are whithin the Paris metropolitan\n# region (checking Google Maps) and time is limited to the month of March/2019.\n# However, after some further exploration, one sees that 'battery_level'\n# shows 12 rows with values above 100% (101% in case) from the vendor Voi. \n# This is probably due to errors in their IoT battery sensors.\n\n# Correting this out-of-bound values\ndf['battery_level'][df.battery_level==df.battery_level.max()]=100.0\n\n#%% Check if there are duplicated rows\ndouble = np.sum(df.duplicated())\n# It is observed there are 281 duplicated rows, probably due to problems \n# during data transmission from the e-scooters\n\n# Dropping the duplicated rows\ndf.drop_duplicates(inplace=True)\n\n#%% Import geografic data for HotZone analysis\n# The dataset was obtanined in https://opendata.paris.fr/explore/dataset/arrondissements/information/\n# This is an official site with open information about the city of Paris\narons = pd.read_csv('~/Data/arrondissements.csv', sep=';')\narons.sort_values('C_AR', inplace=True)\narons.reset_index(inplace=True, drop=True)\narons.drop(['N_SQ_AR','C_AR','C_ARINSEE','L_AR','N_SQ_CO','SURFACE',\n 'PERIMETRE','OBJECTID','LONGUEUR'], axis=1, inplace=True)\narons.columns = ['name','center','geometry']\n\ncenters = [ast.literal_eval(el) for el in arons.center] \ncenters = [(el[1], el[0]) for el in centers]\narons.center = centers\n\ngeom = [ast.literal_eval(el) for el in arons.geometry]\ngeom = [np.array(el['coordinates'][0]) for el in geom]\narons.geometry = geom\n\npaths = [mplpath.Path(el) for el in arons.geometry]\narons['polygon'] = paths\n\n# The arrondissement dataset was cleaned to present only relevant information\n# and the region of each arrondissement was calculated based on the \n# geometry and coordinates provided.\n\n#%% Append the arrondissement each scooter is in\npoints = df[['lng','lat']]\npoints = points.values\npoints = np.array(points, dtype=float)\nmark = []\nfor i in range(len(paths)):\n mark.append(paths[i].contains_points(points))\n\ndf['aron'] = 0\nfor j in range(len(mark)):\n df['aron'][mark[j]] = j+1\n\ndf = df[df.aron!=0]\n\n# Each scooter was place in a arrondissement for the HotZone analysis. It\n# was observed that 2% of the data was located outside Paris. Given this\n# is a small number of observations and the great difficulty\n# in finding geografic coordinates for the metropolitan region, it was \n# decided to drop those observations, since such a small number wouldn't\n# affect the final objective of the study whilist saving a lot of time in\n# the analysis.\n\n# One can notice that, after this row deletion, vendor Mobike went missing.\n# Given that they had such a small participation in the data, they could\n# be considered an outlier and their removal wouldn't impact the analysis.\n\n#%% Save the clean datasets in our localdir for future analysis\nwith open('~/pickles/escooters_clean.pkl', 'wb') as f:\n pickle.dump(df, f)\nwith open('~/pickles/arons_poly.pkl','wb') as f:\n pickle.dump(arons, f)","sub_path":"escooters/escooters_data_exploration.py","file_name":"escooters_data_exploration.py","file_ext":"py","file_size_in_byte":5374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"577517633","text":"# coding=utf-8\nfrom .base import Layer\nimport torch\nimport torch.nn as nn\n\n\n__all__ = ['Coord']\n\n\nclass ImgCoord(object):\n def __init__(self, name):\n self.name = name\n self.coord_buffer = {}\n\n def key(self, img_h, img_w):\n return f'{img_h}_{img_w}'\n\n def __getitem__(self, img_shape):\n batch_size, _, img_h, img_w = img_shape\n return self.get_coord(img_h, img_w, batch_size)\n\n def get_coord(self, img_h, img_w, batch_size=None):\n key = self.key(img_h, img_w)\n if key not in self.coord_buffer:\n self.coord_buffer[key] = coord = self.create_coord(img_h, img_w)\n else:\n coord = self.coord_buffer[key]\n if batch_size is None:\n return coord\n else:\n return coord.expand(batch_size, -1, -1, -1)\n\n def create_coord(self, img_h, img_w):\n h_coord = torch.linspace(-1, 1, steps=img_h).unsqueeze(1).expand(1, img_h, img_w)\n w_coord = torch.linspace(-1, 1, steps=img_w).unsqueeze(0).expand(1, img_h, img_w)\n return torch.cat((w_coord, h_coord))\n\n def __call__(self, x):\n return self[x.shape].type_as(x)\n\n # def create_coord(self, img_h, img_w):\n # h_coord = torch.linspace(-1, 1, steps=img_h).unsqueeze(1).expand(1, img_h, img_w)\n # w_coord = torch.linspace(-1, 1, steps=img_w).unsqueeze(0).expand(1, img_h, img_w)\n # dist = torch.sqrt(h_coord**2 + w_coord**2)\n # theta = torch.atan2(h_coord, w_coord)\n # return torch.cat((dist, h_coord))\n\n # def create_coord(self, img_h, img_w):\n # h_coord = torch.linspace(0, 1, steps=img_h).unsqueeze(1).expand(1, img_h, img_w)\n # w_coord = torch.linspace(0, 1, steps=img_w).unsqueeze(0).expand(1, img_h, img_w)\n # return torch.cat((w_coord, h_coord))\n\n\nimg_coord = ImgCoord('default')\n\n\nclass CoordLayer(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.layers = nn.Sequential(\n nn.Conv2d(2, 10, 3, padding=1),\n # nn.BatchNorm2d(10),\n nn.ReLU(inplace=True),\n )\n\n def forward(self, x):\n x_coord = img_coord(x)\n x_coord = self.layers(x_coord)\n return x_coord\n\n\nclass CoordLayerNew(nn.Module):\n def __init__(self, embed_num=196, embed_dim=10):\n super().__init__()\n self.embed_l = nn.Embedding(embed_num, embed_dim)\n self.embed_dim = embed_dim\n\n def forward(self, x):\n b, c, h, w = x.shape\n x_coord = torch.linspace(0, h*w-1, h*w).unsqueeze(0).repeat(b, 1).type_as(x).long()\n x_coord = self.embed_l(x_coord).view(b, h, w, self.embed_dim).permute(0, 3, 1, 2)\n return x_coord\n\n\nlearned_layers = {}\n\n\nclass Coord(Layer):\n def __init__(self, type='default'):\n super().__init__()\n # assert type in ('learn', 'default')\n self.type = type\n if 'learn' in type:\n feat_h = int(type.split('-')[-1])\n global learned_layers\n if feat_h in learned_layers:\n self.layer = learned_layers[feat_h]\n else:\n self.layer = CoordLayerNew(embed_num=feat_h*feat_h)\n learned_layers[feat_h] = self.layer\n else:\n self.layer = img_coord\n\n @property\n def out_dim(self):\n return 2 if self.type == 'default' else 10\n\n def forward(self, x):\n out = torch.cat((x, self.layer(x)), dim=1)\n return out\n\n def build(cls, params, cls_name=None, sub_cls=None):\n return cls()","sub_path":"pt_pack/modules/layers/base_layers/coord.py","file_name":"coord.py","file_ext":"py","file_size_in_byte":3497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"510997802","text":"import os\n\n\nfrom flask import Flask, redirect, url_for, render_template, session, request\nfrom flask_seasurf import SeaSurf\nimport sentry_sdk\nfrom sentry_sdk.integrations.flask import FlaskIntegration\n\n\nfrom sfa_dash.blueprints.auth0 import (make_auth0_blueprint,\n oauth_request_session)\nfrom sfa_dash.database import db, session_storage\nfrom sfa_dash.filters import register_jinja_filters\nfrom sfa_dash.template_globals import template_variables\nfrom sfa_dash import error_handlers\n\n\ndef create_app(config=None):\n sentry_sdk.init(send_default_pii=False,\n integrations=[FlaskIntegration()])\n\n app = Flask(__name__)\n config = config or 'sfa_dash.config.DevConfig'\n app.config.from_object(config)\n app.secret_key = app.config['SECRET_KEY']\n SeaSurf(app)\n register_jinja_filters(app)\n error_handlers.register_handlers(app)\n\n if app.config['SQLALCHEMY_DATABASE_URI']:\n db.init_app(app)\n db.create_all(app=app)\n\n make_auth0_blueprint(\n app,\n base_url=app.config['AUTH0_OAUTH_BASE_URL'],\n storage=session_storage)\n\n def protect_endpoint():\n try:\n authorized = oauth_request_session.authorized\n except ValueError:\n # no token set for user/no user set\n authorized = False\n\n # authorized == True means we have a token, not necessarily that it\n # hasn't expired, but refreshing is handled\n # by request_oauthlib and oauthlib\n # and the api validates expiration\n if not authorized:\n session['redirect_path'] = request.path\n return redirect(url_for('auth0.login'))\n\n @app.route('/')\n def index():\n # move index to app so all blueprints are secured\n # should probably test if authorized and show one\n # page, show a different page w/ login link otherwise\n return render_template('index.html')\n\n @app.route('/documentation/')\n def documentation():\n return render_template('documentation.html')\n\n @app.route('/changelog/')\n def changelog():\n return render_template('changelog.html')\n\n @app.context_processor\n def inject_globals():\n # Injects variables into all rendered templates\n global_template_args = {}\n global_template_args['user'] = session.get('userinfo')\n global_template_args.update(template_variables())\n return global_template_args\n\n @app.errorhandler(500)\n def server_error_handler(error):\n return render_template(\n \"500.html\",\n sentry_event_id=sentry_sdk.last_event_id(),\n dsn=os.getenv('SENTRY_DSN', '')), 500\n\n from sfa_dash.blueprints.main import data_dash_blp\n from sfa_dash.blueprints.form import forms_blp\n from sfa_dash.blueprints.admin import admin_blp\n\n for blp in (data_dash_blp, forms_blp, admin_blp):\n blp.before_request(protect_endpoint)\n app.register_blueprint(blp)\n return app\n\n\ndef create_app_with_metrics(config=None): # pragma: no cover # NOQA\n from prometheus_flask_exporter.multiprocess import (\n GunicornPrometheusMetrics)\n app = create_app(config)\n GunicornPrometheusMetrics(app=app, group_by='url_rule')\n return app\n\n\nif __name__ == '__main__':\n app = create_app()\n app.run()\n","sub_path":"sfa_dash/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"92475889","text":"\"\"\"\nDocumentação:\nhttps://docs.python.org/3/tutorial/modules.html\n\n# Importa tudo dentro de calculos\n\nimport calculos\n\nprint(__name__) # para o sistema o nome deste módulo é __main__\nprint(calculos.PI)\n\nlista = [2, 4]\nprint(calculos.multiplica(lista))\n\n# Importando somente a função multiplica\nfrom calculos import multiplica\nprint(multiplica([2, 4]))\n\n\"\"\"\n\nfrom calculos import multiplica, dobra_lista, PI\nfrom outro import fala_oi\n\nprint(multiplica([2, 4]))\nfala_oi()\n\n\nprint(PI)\n\nlista = [2, 4, 5, 6, 7]\nnova_lista = []\nprint(dobra_lista(lista))\n\nfor i in dobra_lista(lista):\n nova_lista.append(i * PI)\nprint(nova_lista)\n","sub_path":"Curso_de_Python_3_do Basico_ao_Avancado_com_projetos_reais/Aulas/Aula87_criando_modulos/criando_modulos.py","file_name":"criando_modulos.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"335787094","text":"#!/usr/bin/env python3\n#\n# jacoco-badge-generator: Github action for generating a jacoco coverage\n# percentage badge.\n# \n# Copyright (c) 2020-2021 Vincent A Cicirello\n# https://www.cicirello.org/\n#\n# MIT License\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n# \n\nimport csv\nimport sys\nimport math\nimport pathlib\nimport os\nimport os.path\n\nbadgeTemplate = '\\\n\\\n\\\n\\\n\\\n\\\n\\\n\\\n\\\n{3}\\\n{3}\\\n{0}{0}\\\n'\n\ncolors = [ \"#4c1\", \"#97ca00\", \"#a4a61d\", \"#dfb317\", \"#fe7d37\", \"#e05d44\" ]\n\ndef generateBadge(covStr, color, badgeType=\"coverage\") :\n \"\"\"Generates the badge as a string.\n\n Keyword arguments:\n covStr - The coverage as a string.\n color - The color for the badge.\n badgeType - The text string for a label on the badge.\n \"\"\"\n if len(covStr) >= 4 :\n textLength = \"330\"\n elif len(covStr) >= 3 :\n textLength = \"250\" \n else :\n textLength = \"170\"\n return badgeTemplate.format(covStr, color, textLength, badgeType)\n\ndef computeCoverage(fileList) :\n \"\"\"Parses one or more jacoco.csv files and computes code coverage\n percentages. Returns: coverage, branchCoverage. The coverage\n is instruction coverage.\n\n Keyword arguments:\n fileList - A list (or any iterable) of the filenames, including path, of the jacoco.csv files.\n \"\"\"\n missed = 0\n covered = 0\n missedBranches = 0\n coveredBranches = 0\n for filename in fileList :\n with open(filename, newline='') as csvfile :\n jacocoReader = csv.reader(csvfile)\n for i, row in enumerate(jacocoReader) :\n if i > 0 :\n missed += int(row[3])\n covered += int(row[4])\n missedBranches += int(row[5])\n coveredBranches += int(row[6])\n return (calculatePercentage(covered, missed),\n calculatePercentage(coveredBranches, missedBranches))\n\ndef calculatePercentage(covered, missed) :\n \"\"\"Calculates the coverage percentage from number of\n covered and number of missed. Returns 1 if both are 0\n to handle the special case of running on an empty class\n (no instructions) or a case with no if, switch, loops (no\n branches).\n\n Keyword arguments:\n covered - The number of X covered (where X is the metric).\n missed - The number of X missed (where X is the metric).\n \"\"\"\n if missed == 0 :\n return 1\n return covered / (covered + missed)\n\ndef badgeCoverageStringColorPair(coverage) :\n \"\"\"Converts the coverage percentage to a formatted string,\n and determines the badge color.\n Returns: coveragePercentageAsString, colorAsString\n\n Keyword arguments:\n coverage - The coverage percentage.\n \"\"\"\n # Truncate the 2nd decimal place, rather than rounding\n # to avoid considering a non-passing percentage as\n # passing (e.g., if user considers 70% as passing threshold,\n # then 69.99999...% is technically not passing).\n coverage = int(1000 * coverage) / 10\n c = math.ceil((100 - coverage) / 10)\n if c >= len(colors) :\n c = len(colors) - 1\n if coverage - int(coverage) == 0 :\n cov = \"{0:d}%\".format(int(coverage))\n else :\n cov = \"{0:.1f}%\".format(coverage)\n return cov, colors[c]\n\ndef createOutputDirectories(badgesDirectory) :\n \"\"\"Creates the output directory if it doesn't already exist.\n\n Keyword arguments:\n badgesDirectory - The badges directory\n \"\"\"\n if not os.path.exists(badgesDirectory) :\n p = pathlib.Path(badgesDirectory)\n os.umask(0)\n p.mkdir(mode=0o777, parents=True, exist_ok=True)\n\ndef splitPath(filenameWithPath) :\n \"\"\"Breaks a filename including path into containing directory and filename.\n\n Keyword arguments:\n filenameWithPath - The filename including path.\n \"\"\"\n if filenameWithPath.startswith(\"./\") :\n filenameWithPath = filenameWithPath[2:]\n if filenameWithPath[0] == \"/\" :\n filenameWithPath = filenameWithPath[1:]\n i = filenameWithPath.rfind(\"/\")\n if i >= 0 :\n return filenameWithPath[:i], filenameWithPath[i+1:]\n else :\n return \".\", filenameWithPath\n\ndef formFullPathToFile(directory, filename) :\n \"\"\"Generates path string.\n\n Keyword arguments:\n directory - The directory for the badges\n filename - The filename for the badge.\n \"\"\"\n if len(filename) > 1 and filename[0:2] == \"./\" :\n filename = filename[2:]\n if filename[0] == \"/\" :\n filename = filename[1:]\n if len(directory) > 1 and directory[0:2] == \"./\" :\n directory = directory[2:]\n if len(directory) > 0 and directory[0] == \"/\" :\n directory = directory[1:]\n if directory == \"\" or directory == \".\" :\n return filename\n elif directory[-1] == \"/\" :\n return directory + filename\n else :\n return directory + \"/\" + filename\n\nif __name__ == \"__main__\" :\n jacocoCsvFile = sys.argv[1]\n badgesDirectory = sys.argv[2]\n coverageFilename = sys.argv[3]\n branchesFilename = sys.argv[4]\n generateCoverageBadge = sys.argv[5].lower() == \"true\"\n generateBranchesBadge = sys.argv[6].lower() == \"true\"\n\n if len(badgesDirectory) > 1 and badgesDirectory[0:2] == \"./\" :\n badgesDirectory = badgesDirectory[2:]\n if len(badgesDirectory) > 0 and badgesDirectory[0] == \"/\" :\n badgesDirectory = badgesDirectory[1:]\n if badgesDirectory == \".\" :\n badgesDirectory = \"\"\n\n jacocoFileList = jacocoCsvFile.split()\n\n cov, branches = computeCoverage(jacocoFileList)\n\n if (generateCoverageBadge or generateBranchesBadge) and badgesDirectory != \"\" :\n createOutputDirectories(badgesDirectory)\n\n if generateCoverageBadge :\n covStr, color = badgeCoverageStringColorPair(cov)\n with open(formFullPathToFile(badgesDirectory, coverageFilename), \"w\") as badge :\n badge.write(generateBadge(covStr, color))\n\n if generateBranchesBadge :\n covStr, color = badgeCoverageStringColorPair(branches)\n with open(formFullPathToFile(badgesDirectory, branchesFilename), \"w\") as badge :\n badge.write(generateBadge(covStr, color, \"branches\"))\n\n print(\"::set-output name=coverage::\" + str(cov))\n print(\"::set-output name=branches::\" + str(branches))\n \n\n\n \n \n","sub_path":"JacocoBadgeGenerator.py","file_name":"JacocoBadgeGenerator.py","file_ext":"py","file_size_in_byte":8288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"46223269","text":"def binary_search(arr, low, high, searchData):\n if high >= low:\n mid = (high + low)//2\n if arr[mid] == searchData:\n return mid\n elif arr[mid] > searchData:\n return binary_search(arr, low, mid - 1, searchData)\n else: \n return binary_search(arr, mid + 1, high, searchData)\n else:\n return -1\n\n\n\n\nlistData = [1, 10, 20, 30, 40, 45, 50, 60, 67, 69, 70, 80]\n\nreturnindex = binary_search(listData, 0, len(listData)-1, 62)\n\nprint(f\"Value Foundd at index {returnindex}\")","sub_path":"Python_Training/binarySearch.py","file_name":"binarySearch.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"456469426","text":"\n\n# import sys\n\n# n = int(input())\n# numbers = map(int, input().split())\n# window = int(input())\n\n# n = 8\n# numbers = list(map(int, '2 7 3 1 5 2 6 2'.split()))\n# window = 4\n\n\nn = 3\nnumbers = list(map(int, '2 1 5'.split()))\nwindow = 1\n\nin_stack = []\nout_stack = []\n\n\nfor number in numbers:\n if len(in_stack) < window and not out_stack:\n in_stack_max = number if not in_stack or number >= in_stack[-1][1] else in_stack[-1][1]\n in_stack.append((number, in_stack_max))\n\n if len(in_stack) == window and not out_stack:\n\n for __ in range(window):\n poped = in_stack.pop()\n out_stack_max = poped[0] if not out_stack or poped[0] >= out_stack[-1][1] else out_stack[-1][1]\n out_stack.append((poped[0], out_stack_max))\n\n print(out_stack.pop()[1])\n\n elif out_stack:\n in_stack_max = number if not in_stack or number >= in_stack[-1][1] else in_stack[-1][1]\n in_stack.append((number, in_stack_max))\n\n print(max(out_stack.pop()[1], in_stack[-1][1]))\n\n\n\n\n# print(current_max)\n#\n# for number in numbers[window:]:\n# if current_stack.pop(0)\n# current_max = current_max if current_max > number else number\n# print(current_max)\n\n\n\n\n \n\n\n","sub_path":"stepik_algorithm_data_structs/1_2_5_sliding_window.py","file_name":"1_2_5_sliding_window.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"341169085","text":"#!/usr/bin/env python\n\n# Import required libraries\nimport sys\nimport time\nimport RPi.GPIO as GPIO\n\n# Disable GPIO warnings\nGPIO.setwarnings(False)\n\n# Use BOARd GPIO references\n# instead of physical pin numbers\nGPIO.setmode(GPIO.BOARD)\n\n# Define GPIO signals to use\n# Physical pins 11,15,16,18\n# GPIO17,GPIO18,GPIO27,GPIO22\nStepPins = [11,15,16,18]\n\n# Initialise variables\nStepCounter = 0\n\n# Setup magnet input pin\nGPIO.setup(40, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n\n# Read wait time from command line\nWaitTime = 1/float(1000)\n\ndef clearPins():\n\t# Set all pins as output\n\tfor pin in StepPins:\n\t GPIO.setup(pin,GPIO.OUT)\n\t GPIO.output(pin, False)\n\t\ndef runMotor():\n\tglobal StepCounter\n\t# Define advanced sequence\n\t# as shown in manufacturers datasheet\n\tSeq = [[1,0,0,0],\n\t [1,1,0,0],\n\t [0,1,0,0],\n\t [0,1,1,0],\n\t [0,0,1,0],\n\t [0,0,1,1],\n\t [0,0,0,1],\n\t [1,0,0,1]]\n\n\tStepCount = len(Seq)-1\n\tStepDir = 1 # Set to 1 or 2 for clockwise\n\t\t # Set to -1 or -2 for anti-clockwise\n\n\n\tfor pin in range(0, 4):\n\t xpin = StepPins[pin]\n\t if Seq[StepCounter][pin]!=0:\n\t GPIO.output(xpin, True)\n\t else:\n\t GPIO.output(xpin, False)\n\n\tStepCounter += StepDir\n\t# If we reach the end of the sequence\n\t# start again\n\tif (StepCounter>=StepCount):\n\t StepCounter = 0\n\tif (StepCounter<0):\n\t StepCounter = StepCount\n\ndef readMagnet():\n\treturn GPIO.input(40)\n\nclearPins()\nwhile readMagnet():\n\trunMotor()\n\ttime.sleep(WaitTime)\nclearPins()\n","sub_path":"back/programs/motor/magnet_with_motor.py","file_name":"magnet_with_motor.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"277547800","text":"import torch\nfrom torch.autograd import Variable\nfrom torch.autograd import Function\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom abc import ABCMeta, abstractmethod\nfrom torchvision import datasets, transforms\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\nimport os\nfrom torch.utils.data import Dataset\nimport time\nimport pickle as pkl\nfrom torch import nn\nimport torch.optim as optim\nimport torch.nn.init\nimport math\nimport pickle\nimport Resnet18\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = '7'\ndevice = Resnet18.device\n\n\ndef pgd_attack(model, images, labels, eps=8 / 255, alpha=2 / 255, iters=10):\n images = images.to(device)\n labels = labels.to(device)\n loss = nn.CrossEntropyLoss()\n\n ori_images = images.data\n\n for i in range(iters):\n images.requires_grad = True\n outputs = model(images)\n\n model.zero_grad()\n cost = loss(outputs, labels).to(device)\n cost.backward()\n\n adv_images = images + alpha * images.grad.sign()\n eta = torch.clamp(adv_images - ori_images, min=-eps, max=eps)\n images = torch.clamp(ori_images + eta, min=0, max=1).detach_()\n\n return images\n\n\ndef fgsm_attack(model, images, labels, eps=0.1, alpha=0.1, iters=1):\n images = images.to(device)\n labels = labels.to(device)\n loss = nn.CrossEntropyLoss()\n\n ori_images = images.data\n\n for i in range(iters):\n images.requires_grad = True\n outputs = model(images)\n\n model.zero_grad()\n cost = loss(outputs, labels).to(device)\n cost.backward()\n\n adv_images = images + alpha * images.grad.sign()\n eta = torch.clamp(adv_images - ori_images, min=-eps, max=eps)\n images = torch.clamp(ori_images + eta, min=0, max=1).detach_()\n\n return images\n\n\ndef lbfgs_attack(model, images, labels, eps=8 / 255, alpha=0.1, iters=20):\n images = images.to(device)\n # labels = labels.to(device)\n lossfunc = nn.CrossEntropyLoss()\n\n ori_images = images.data\n\n xv = nn.Parameter(images, requires_grad=True)\n # xv = nn.Parameter(torch.FloatTensor(x.reshape(1, 28 * 28)), requires_grad=True)\n y_true = Variable(torch.LongTensor(labels.cpu()).to(device), requires_grad=False)\n method = optim.LBFGS([xv], lr=5e-2, max_iter=iters)\n # Classification before Adv\n y_pred = torch.argmax(model(xv), dim=-1)\n\n # Generate Adversarial Image\n def closure():\n method.zero_grad()\n output = model(xv)\n loss = -lossfunc(output, y_true)\n loss.backward()\n return loss\n\n method.step(closure)\n # method = optim.LBFGS(list(xv), lr=1e-1)\n # Add perturbation\n # x_grad = torch.sign(x.grad.data)\n x_adversarial = torch.clamp(xv.data, 0, 1)\n\n return x_adversarial\n\n\nif __name__ == '__main__':\n import Resnet18\n\n resnet18A_model = [Resnet18.getResNet18PlusA, Resnet18.getResNet18NA]\n\n resnet18A_name = ['Resnet18.getResNet18PlusA', 'Resnet18.getResNet18NA']\n\n resnet18FC_model = [Resnet18.getResNet18, Resnet18.getResNet18PlusFC, Resnet18.getResNet18NFC]\n\n resnet18FC_name = ['Resnet18.getResNet18', 'Resnet18.getResNet18PlusFC', 'Resnet18.getResNet18NFC']\n model_zoo_name = [resnet18FC_name, resnet18A_name]\n noise = ['FGSM', 'LBFGS', 'PGD']\n noise_method = [fgsm_attack, lbfgs_attack, pgd_attack]\n\n strength = [1, 2, 3, 4, 5]\n model_zoo = [resnet18FC_model, resnet18A_model]\n transform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4), # 先四周填充0,在吧图像随机裁剪成32*32\n transforms.RandomHorizontalFlip(), # 图像一半的概率翻转,一半的概率不翻转\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), # R,G,B每层的归一化用到的均值和方差\n ])\n\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ])\n batch_size = 128\n epoches = 50\n loss = 0.\n train_dataset = datasets.CIFAR10(root='../../LR/EBP', train=True, transform=transform_train, download=True)\n test_dataset = datasets.CIFAR10(root='../../LR/EBP', train=False, transform=transform_test, download=True)\n train_dataloader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)\n test_dataloader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True)\n epsilon = 0.1\n info = dict()\n\n for i in range(len(model_zoo)):\n for j in range(len(model_zoo[i])):\n # print('new model training:{}'.format(model_zoo_name[i][j]))\n dense_base_acc = [0. for _ in range(len(resnet18FC_model))]\n cnn_base_acc = [0. for _ in range(len(resnet18A_model))]\n base_acc = [dense_base_acc, cnn_base_acc]\n adv_acc_hot = [0. for _ in range(len(noise))]\n dense_adv_acc = [adv_acc_hot for _ in range(len(resnet18FC_model))]\n cnn_adv_acc = [adv_acc_hot for _ in range(len(resnet18A_model))]\n adv_acc = [dense_adv_acc, cnn_adv_acc]\n model = model_zoo[i][j]()\n static = torch.load('./mnist_model/{}.pth'.format(model_zoo_name[i][j]), map_location='cpu')\n model.load_state_dict(static)\n model = model.to(device)\n model.eval()\n for noise_kind in range(len(noise)):\n N = 0.\n tmp_base_acc = 0.\n tmp_adv_acc = 0.\n for batch, [inputs, labels] in tqdm(enumerate(test_dataloader)):\n N += len(inputs)\n\n inputs = Variable(torch.FloatTensor(inputs).to(device), requires_grad=True)\n labels = Variable(torch.LongTensor(labels).to(device), requires_grad=False)\n base_outputs = model(inputs)\n base_predict = torch.argmax(base_outputs, dim=-1)\n tmp_base_acc = np.sum((base_predict.cpu().numpy() == labels.cpu().numpy()))\n base_acc[i][j] += tmp_base_acc\n\n x_adversarial = noise_method[noise_kind](model, inputs, labels)\n\n # Classification after optimization\n y_pred_adversarial = torch.argmax(model(x_adversarial), dim=-1)\n adv_acc[i][j][noise_kind] += np.sum(y_pred_adversarial.cpu().numpy() == labels.cpu().numpy())\n adv_acc[i][j][noise_kind] /= N\n base_acc[i][j] /= N\n\n info[model_zoo_name[i][j]] = {\n 'base acc': base_acc[i][j],\n 'adv acc': adv_acc[i][j]\n }\n for model_name in info.keys():\n print(model_name)\n print(info[model_name])\n","sub_path":"NoiseOptimizationInANN/Cifar10/WhiteEval.py","file_name":"WhiteEval.py","file_ext":"py","file_size_in_byte":6707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"581557252","text":"#!/usr/bin/env python\n\nimport os\nimport subprocess\nfrom subprocess import PIPE, STDOUT, Popen, call\nfrom django.contrib.sitemaps import ping_google\n\nclass StartServer:\n def __init__(self, production=os.environ.get('PRODUCTION')):\n if production == 'False' or production == 'false':\n call(['python', 'manage.py', 'runserver', '0.0.0.0:8000'], stderr=STDOUT)\n\n elif production == 'True' or production == 'true':\n call(['gunicorn', 'blank_project.wsgi', '--bind=0.0.0.0:8000', '--workers=4', \\\n '--chdir=/code/blank_project', '--log-file=-'], stderr=STDOUT)\n\n @staticmethod\n def send_ping():\n ping_google()\n\nStartServer()\n","sub_path":"blank_project/docker/_tests/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"50353891","text":"'''\r\nCreated on 27.08.2018\r\n\r\n@author: FM\r\n'''\r\nimport unittest\r\nimport unittest.mock as mock\r\nfrom FileSet import FileSet\r\nfrom test.testing_tools import mock_assert_msg, mock_assert_many_msg\r\n\r\n\r\nmock_rename = mock.MagicMock()\r\n\r\n@mock.patch('FileSet.rename', new=mock_rename)\r\nclass ChangeIndexTests(unittest.TestCase):\r\n \r\n @classmethod\r\n def setUpClass(cls):\r\n cls.pattern = ('test (', ')')\r\n \r\n def tearDown(self):\r\n mock_rename.reset_mock()\r\n \r\n \r\n def test_middle_to_end(self):\r\n \"\"\"The FileSet should be able to change the index of a file to a free position at the end of the set.\"\"\"\r\n test_files = ['test (0).jpg', 'test (1).jpg', 'test (2).jpg', 'test (3).jpg']\r\n \r\n test_set = FileSet(self.pattern, test_files)\r\n test_set.change_index(1, 4)\r\n \r\n result_files = {0: ['jpg'], 2: ['jpg'], 3: ['jpg'], 4: ['jpg']}\r\n mock_assert_msg(mock_rename.assert_called_with, ['test (1).jpg', 'test (4).jpg'], msg=\"The FileSet doesn't actually rename the file properly.\")\r\n self.assertEqual(test_set.files, result_files, \"The FileSet fails to change a file's index.\")\r\n self.assertEqual(test_set.max_index, 4, \"The FileSet fails to update its max_index after a change_index operation that makes it necessary.\")\r\n \r\n def test_middle_to_middle(self):\r\n \"\"\"The FileSet should be able to change the index of a file to a free position at the beginning of the set.\"\"\"\r\n test_files = ['test (0).jpg', 'test (1).jpg', 'test (3).jpg', 'test (4).jpg']\r\n \r\n test_set = FileSet(self.pattern, test_files)\r\n test_set.change_index(1, 2)\r\n \r\n result_files = {0: ['jpg'], 2: ['jpg'], 3: ['jpg'], 4: ['jpg']}\r\n mock_assert_msg(mock_rename.assert_called_with, ['test (1).jpg', 'test (2).jpg'], msg=\"The FileSet doesn't rename the file properly.\")\r\n self.assertEqual(test_set.files, result_files, \"The FileSet fails to change a file's index in the middle of the set.\")\r\n \r\n def test_multi_assigned_index(self):\r\n \"\"\"The FileSet should be able to change the index of files even if this index is multi-assigned.\"\"\"\r\n test_files = ['test (0).jpg', 'test (1).jpg', 'test (2).jpg', 'test (2).png', 'test (3).jpg'] \r\n \r\n test_set = FileSet(self.pattern, test_files)\r\n test_set.change_index(2, 4)\r\n \r\n result_files = {0: ['jpg'], 1: ['jpg'], 3: ['jpg'], 4: ['jpg', 'png']}\r\n \r\n assertion_calls = [\r\n (mock_rename.assert_any_call , ['test (2).jpg', 'test (4).jpg']),\r\n (mock_rename.assert_any_call , ['test (2).png', 'test (4).png'])\r\n ]\r\n mock_assert_many_msg(assertion_calls, \"The FileSet does not correctly rename both of the files of the multi-assigned index.\")\r\n self.assertEqual(test_set.files, result_files, \"The FileSet fails to operate correctly when dealing with a multi-assigned index.\")\r\n \r\n def test_index_unassigned_far_up(self):\r\n \"\"\"The FileSet should recognize an unassigned index that's far out of bounds of the set and raise an error.\"\"\"\r\n test_files = ['test (0).jpg', 'test (1).jpg', 'test (2).jpg', 'test (2).png', 'test (3).jpg'] \r\n test_set = FileSet(self.pattern, test_files)\r\n \r\n with self.assertRaises(FileSet.IndexUnassignedError, msg=\"The FilsSet fails to stop when the given file's index does not exist.\"):\r\n test_set.change_index(12, 2)\r\n \r\n def test_index_unassigned_inside(self):\r\n \"\"\"The FileSet should recognize an unassigned index within the set and raise an error.\"\"\"\r\n test_files = ['test (0).jpg', 'test (3).jpg'] \r\n test_set = FileSet(self.pattern, test_files)\r\n \r\n with self.assertRaises(FileSet.IndexUnassignedError, msg=\"The FilsSet fails to stop when the given file's index does not exist.\"):\r\n test_set.change_index(1, 2)\r\n \r\n def test_to_index_assigned(self):\r\n \"\"\"The FileSet should recognize when an index the file is supposed to be moved to is already assigned and raise an error.\"\"\"\r\n test_files = ['test (0).jpg', 'test (3).jpg'] \r\n test_set = FileSet(self.pattern, test_files)\r\n \r\n with self.assertRaises(FileSet.IndexAssignedError, msg=\"The FileSet fails to stop when a file's index is attempted to be set to an already assigned one.\"):\r\n test_set.change_index(0, 3)\r\n \r\n def test_from_to_same_index(self):\r\n \"\"\"The FileSet should do nothing when a file is requested to be moved to the same index it already has.\"\"\"\r\n test_files = ['test (0).jpg', 'test (1).jpg', 'test (2).jpg', 'test (3).jpg']\r\n \r\n test_set = FileSet(self.pattern, test_files)\r\n try:\r\n test_set.change_index(1, 1)\r\n except FileSet.IndexAssignedError:\r\n self.fail(\"The FileSet doesn't recognize when an index is changed to itself, in which case nothing should happen.\")\r\n \r\n result_files = {0: ['jpg'], 1: ['jpg'], 2: ['jpg'], 3: ['jpg']}\r\n mock_assert_msg(mock_rename.assert_not_called, [], msg=\"The FileSet tries to rename a file that's already in the right place.\")\r\n self.assertEqual(test_set.files, result_files, \"The FileSet unnecessarily changes the file's index.\")\r\n self.assertEqual(test_set.max_index, 3, \"The FileSet unnecessarily changes the max_index.\")\r\n \r\n def test_multi_assigned_specify_specific_type(self):\r\n \"\"\"The FileSet should be able to only move the file with the given file_type when dealing with a multi-assigned index.\"\"\" \r\n test_files = ['test (0).jpg', 'test (1).jpg', 'test (2).jpg', 'test (2).png', 'test (3).jpg'] \r\n \r\n test_set = FileSet(self.pattern, test_files)\r\n test_set.change_index(2, 4, 'png')\r\n \r\n result_files = {0: ['jpg'], 1: ['jpg'], 2: ['jpg'], 3: ['jpg'], 4: ['png']}\r\n \r\n assertion_calls = [\r\n (mock_rename.assert_called_once_with , ['test (2).png', 'test (4).png'])\r\n ]\r\n mock_assert_many_msg(assertion_calls, \"The FileSet does not correctly rename only the single files of the multi-assigned index when the file type is stated.\")\r\n self.assertEqual(test_set.files, result_files, \"The FileSet fails to update its logical file list accordingly when only moving a single type of the multi-assigned index.\")\r\n self.assertEqual(test_set.max_index, 4, \"The FileSet fails to update its max_index when only moving one type of a multi-assigned index.\")\r\n \r\n def test_multi_assigned_specified_type_does_not_exist(self):\r\n \"\"\"The FileSet should recognize when a requested file_type does not exist in the (multi-)assigned index.\"\"\"\r\n test_files = ['test (0).jpg', 'test (1).jpg', 'test (2).jpg', 'test (3).jpg'] \r\n \r\n test_set = FileSet(self.pattern, test_files)\r\n with self.assertRaises(FileSet.TypeUnassignedError, msg=\"The FileSet fails to recognize when a type to be moved is not actually assigned.\"):\r\n test_set.change_index(2, 4, 'png')\r\n \r\n def test_change_max_index_downwards(self):\r\n \"\"\"The FileSet should recognize and update the max index correctly if it is moved downwards.\"\"\"\r\n test_files = ['test (0).jpg', 'test (2).jpg', 'test (3).jpg']\r\n \r\n test_set = FileSet(self.pattern, test_files)\r\n test_set.change_index(3, 1)\r\n \r\n self.assertEqual(test_set.max_index, 2, \"The FileSet fails to update to the correct max index.\")\r\n\r\nif __name__ == \"__main__\":\r\n #import sys;sys.argv = ['', 'Test.testName']\r\n unittest.main()","sub_path":"src/test/FileSet_tests/low_level_tests/test_change_index.py","file_name":"test_change_index.py","file_ext":"py","file_size_in_byte":7692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"603832256","text":"from __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\n\nimport logging\n\nfrom .node import Node, ProducerNode, ProcessorNode, ConsumerNode\n\nSTOP_SIGNAL = \"alalsl;'sdlfj2389jdalskmghsaiaw98y8asdf;askljoa8y;dsf;lkasdb\"\n\nlogger = logging.getLogger(__package__)\n\nclass Task:\n '''\n A ``Task`` is a wrapper around a ``videoflow.core.node.Node`` that \\\n is able to interact with the execution environment through a messenger. \\\n Nodes receive input and/or produce output, but tasks are the ones \\\n that run in infinite loops, receiving inputs from the environment and passing them to the \\\n computation node, and taking outputs from the computation node and passing \\\n them to the environment.\n '''\n def __init__(self, computation_node : Node, task_id : int, parent_task_id : int = None):\n self._messenger = None\n self._computation_node = computation_node\n self._task_id = task_id\n self._parent_task_id = parent_task_id\n \n @property\n def id(self):\n '''\n Returns an integer as id.\n '''\n return self._task_id\n \n @property\n def parent_id(self):\n '''\n Returns the id of the parent task. Id of parent task is lower than id of current task.\n '''\n return self._parent_task_id\n \n @property\n def computation_node(self):\n '''\n Returns the current computation node\n '''\n return self._computation_node\n\n def set_messenger(self, messenger):\n '''\n Used by environment to set the messenger that this task will use to interact with other \n tasks\n '''\n self._messenger = messenger\n\n def _assert_messenger(self):\n assert self._messenger is not None, 'Task cannot run if messenger has not been set.'\n\n def _run(self):\n raise NotImplementedError('Sublcass needs to implement _run')\n\n def run(self):\n '''\n Starts the task in an infinite loop. If this method is called and the \\\n ``set_messenger()`` method has not been called yet, an assertion error \\\n will happen.\n '''\n self._assert_messenger()\n self._computation_node.open()\n self._run()\n self._computation_node.close()\n\nclass ProducerTask(Task):\n '''\n It runs forever calling the ``next()`` method in the producer node. \\\n At each iteration it checks for a termination signal, and if so it \\\n sends a termination message to its child task and breaks the infinite loop.\n '''\n def __init__(self, producer : ProducerNode, task_id : int):\n self._producer = producer\n super(ProducerTask, self).__init__(producer, task_id)\n \n def _run(self):\n while True:\n try:\n a = self._producer.next()\n self._messenger.publish_message(a)\n except StopIteration:\n break\n except KeyboardInterrupt:\n logger.info(\"Interrupt signal received. Sending signal to stop flow.\")\n break\n if self._messenger.check_for_termination():\n break\n self._messenger.publish_termination_message(STOP_SIGNAL)\n\nclass ProcessorTask(Task):\n '''\n It runs forever, first blocking until it receives a message from parent nodes through \\\n the messenger. Then it passes it to the processor node and when it gets back the output \\\n it uses the messenger to publish it down the flow. If among the inputs it received from \\\n a parent it receives a termination message, it passes termination message down the flow \\\n and breaks from infinite loop.\n '''\n def __init__(self, processor : ProcessorNode, task_id : int, parent_task_id : int):\n self._processor = processor\n \n super(ProcessorTask, self).__init__(processor, task_id, parent_task_id) \n \n @property\n def device_type(self):\n return self._processor.device_type\n \n def change_device(self, device_type : str):\n self._processor.change_device(device_type)\n \n def _run(self):\n while True:\n try:\n inputs = self._messenger.receive_message()\n stop_signal_received = any([isinstance(a, str) and a == STOP_SIGNAL for a in inputs])\n if stop_signal_received:\n self._messenger.publish_termination_message(STOP_SIGNAL)\n break\n\n #3. Pass inputs needed to processor\n output = self._processor.process(*inputs)\n self._messenger.publish_message(output)\n except KeyboardInterrupt:\n continue\n \nclass ConsumerTask(Task):\n '''\n It runs forever, blocking until it receives a message from parent nodes through the messenger.\n It consumes the message and does not publish anything back down the pipe.\n\n If a consumer task has tasks after it in the topological sort, it does not mean that\n those tasks expect any input from the consumer task. It simply means that the consumer\n task is a passthrough of messages. \n '''\n def __init__(self, consumer : ConsumerNode, task_id : int, parent_task_id : int,\n has_children_task : bool):\n self._consumer = consumer\n self._has_children_task = has_children_task\n super(ConsumerTask, self).__init__(consumer, task_id, parent_task_id)\n \n def _run(self):\n while True:\n try:\n inputs = self._messenger.receive_message()\n stop_signal_received = any([isinstance(a, str) and a == STOP_SIGNAL for a in inputs])\n if stop_signal_received:\n # No need to pass through stop signal to children.\n # If children need to stop, they will receive it from\n # someone else, so the message that I am passing through\n # might be the one carrying it.\n if self._has_children_task:\n self._messenger.passthrough_termination_message()\n break\n\n if self._has_children_task:\n self._messenger.passthrough_message()\n self._consumer.consume(*inputs)\n except KeyboardInterrupt:\n continue\n ","sub_path":"videoflow/core/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":6355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"28541091","text":"import matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable\nfrom mpl_toolkits.axes_grid1.colorbar import colorbar\nimport numpy as np\ntex_tags = {'0': '$M_{jet}$', '1': '$C_2^{\\\\beta=1}$', '2': '$C_2^{\\\\beta=2}$',\n '3': '$D_2^{\\\\beta=1}$', '4': '$D_2^{\\\\beta=2}$', '5': '$\\\\tau_2^{\\\\beta=1}$'}\n\n\ndef grid_plotter(X, gam, predictions, file_name, mean_auc, std_auc):\n def single_feature_plot(term_num):\n XX = gam.generate_X_grid(term=ix)\n pdep, confi = gam.partial_dependence(term=ix, width=.95)\n pdep99, confi99 = gam.partial_dependence(term=ix, width=.99)\n ax.scatter(X[:, ix], np.full(len(X[:, ix]), min(confi99[:, 0]) * 1.1),\n facecolor='#333333', edgecolors='none', marker=2, rasterized=True)\n ax.fill_between(XX[:, ix], confi99[:, 0], confi99[:, 1], facecolor='#FFFF00',\n alpha=1, label='99$\\%$ confidence interv.') # inner\n ax.fill_between(XX[:, ix], confi[:, 0], confi[:, 1], facecolor='#00ff00',\n alpha=1, label='95$\\%$ confidence interv.') # outer\n ax.plot(XX[:, ix], pdep, c='k', label='partial dependence')\n ax.margins(0, 0)\n ax.set_title(tex_tags[term_num[0]])\n\n def pairwise_plot(term_num):\n XX = gam.generate_X_grid(term=ix, meshgrid=True)\n Z = gam.partial_dependence(term=ix, X=XX, meshgrid=True)\n cnf = ax.contourf(XX[0], XX[1], Z, levels=10)\n ax.contour(XX[0], XX[1], Z, levels=10, linewidths=0.5, colors='k')\n xbottom = ax.get_xlim()\n ybottom = ax.get_ylim()\n ax.set_xlabel(tex_tags[term_num[0]])\n ax.set_ylabel(tex_tags[term_num[1]])\n ax_divider = make_axes_locatable(ax)\n cax = ax_divider.append_axes(\"top\", size=\"7%\", pad=\"3%\")\n cb = colorbar(cnf, cax=cax, orientation=\"horizontal\")\n cax.xaxis.set_ticks_position(\"top\")\n terms = str(gam.terms)\n ind_terms = terms.split('+')[:-1]\n #graph_dim = np.ceil(np.sqrt(len(ind_terms)))\n cols = 3\n rows = np.ceil(len(ind_terms) / cols)\n fig = plt.figure(figsize=(4 * cols, 4 * rows))\n for ix, term in enumerate(ind_terms):\n term_num = ''.join(c for c in term if c.isdigit())\n ax = fig.add_subplot(rows, cols, ix + 1)\n if len(term_num) == 1:\n single_feature_plot(term_num)\n else:\n pairwise_plot(term_num)\n plt.subplots_adjust(wspace=0.35, hspace=0.35)\n fig.suptitle(f'AUC={mean_auc[0]:.2f} $\\pm$ {std_auc[0]:.2f}')\n plt.savefig(f'plots/pairwise_dep/{file_name}.pdf', dpi=1200)\n plt.clf()\n","sub_path":"utils/grid_plotter.py","file_name":"grid_plotter.py","file_ext":"py","file_size_in_byte":2599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"526871542","text":"### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###\r\n#\r\n# Author : Austin Brockner\r\n# Creation Date : 11/8/17\r\n# Details : Used to search a specific trading database for a ticker. \r\n#\r\n### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ###\r\n\r\nimport pandas as pd\r\n\r\ndef checkStock(ticker,tickerFile):\r\n\tdataFile = pd.read_csv(tickerFile)\r\n\tsymbolCol = dataFile.Symbol\r\n\t\r\n\tif symbolCol.str.contains(ticker).any():\r\n\t\treturn True\r\n\t\tprint (ticker + \"exists in\" + tickerFile)\r\n\telse:\r\n\t\treturn False","sub_path":"stockChecker.py","file_name":"stockChecker.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"180972798","text":"from django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse\nfrom .models import Order, StatusCatalog, RequiredMaterial, Storage, DillerCatalog, RequiredOperationProject, RequiredOperationManufactory,RequiredOperationContractor\nfrom .models import WorkerCatalog\nfrom .forms import RequiredOperationProjectForm, RequiredOperationManufactoryForm, RequiredOperationContractorForm\nfrom .forms import OrderForm, DillerForm\nfrom django.forms.models import inlineformset_factory\nfrom django.contrib.auth.decorators import login_required\nimport csv\n\n\n\n@login_required\ndef orders(request):\n\n ordersArray = []\n statusNameArray = []\n\n statuses = StatusCatalog.objects.all().order_by(\"orderliness\")\n\n for stat in statuses:\n order = Order.objects.filter(status = stat.id)\n ordersArray.append( order )\n statusNameArray.append(stat.name)\n\n return render(request, \"ordersAll.html\", {\"ordersArray\" : ordersArray, \"statusNameArray\" : statusNameArray })\n\n\n\n\n\n@login_required\ndef order(request, good_id):\n\n obj = get_object_or_404(Order, id=good_id)\n form = OrderForm(instance=obj)\n\n RequiredMaterialFormset = inlineformset_factory(Order, RequiredMaterial, fields=('idMaterial', 'count',), can_delete=True, extra=1)\n\n\n RequiredOperationProjectFormset = inlineformset_factory(Order, RequiredOperationProject, RequiredOperationProjectForm, can_delete=True, extra=1)\n RequiredOperationManufactoryFormset = inlineformset_factory(Order, RequiredOperationManufactory, RequiredOperationManufactoryForm, can_delete=True, extra=1)\n RequiredOperationContractorFormset = inlineformset_factory(Order, RequiredOperationContractor, RequiredOperationContractorForm, can_delete=True, extra=1)\n\n\n if request.method == \"POST\":\n\n instance = get_object_or_404(Order, id=good_id)\n orderForm = OrderForm(request.POST, instance=instance)\n\n formMaterials = RequiredMaterialFormset(request.POST, instance=obj)\n\n formProjectOperations = RequiredOperationProjectFormset(request.POST, instance=obj)\n formManufactoryOperations = RequiredOperationManufactoryFormset(request.POST, instance=obj)\n formContractorOperations = RequiredOperationContractorFormset(request.POST, instance=obj)\n\n if orderForm.is_valid() and formMaterials.is_valid() and formProjectOperations.is_valid() and formManufactoryOperations.is_valid() and formContractorOperations.is_valid():\n\n obj = orderForm.save()\n\n instance = RequiredMaterial.objects.filter(idOrder=obj)\n instance.delete()\n\n\n\n #materials\n formMaterials.save(commit=False)\n\n for form in formMaterials:\n if form['count'].value():\n choice = form.save(commit=False)\n choice.idOrder = obj\n choice.save()\n\n\n #project operation\n instance = RequiredOperationProject.objects.filter(idOrder=obj)\n instance.delete()\n\n formProjectOperations.save(commit=False)\n\n for form in formProjectOperations.deleted_objects:\n form.delete()\n\n for form in formProjectOperations:\n if form['cost'].value() != 0 and form['cost'].value():\n choice = form.save(commit=False)\n choice.idOrder = obj\n choice.save()\n\n\n # manufactory operation\n instance = RequiredOperationManufactory.objects.filter(idOrder=obj)\n instance.delete()\n\n formManufactoryOperations.save(commit=False)\n\n for form in formManufactoryOperations.deleted_objects:\n form.delete()\n\n for form in formManufactoryOperations:\n if form['cost'].value() != 0 and form['cost'].value():\n choice = form.save(commit=False)\n choice.idOrder = obj\n choice.save()\n\n\n # constractor operation\n instance = RequiredOperationContractor.objects.filter(idOrder=obj)\n instance.delete()\n\n formContractorOperations.save(commit=False)\n\n for form in formContractorOperations.deleted_objects:\n form.delete()\n\n for form in formContractorOperations:\n if form['cost'].value() != 0 and form['cost'].value():\n choice = form.save(commit=False)\n choice.idOrder = obj\n choice.save()\n\n return orders(request)\n\n return render(request, \"order_create.html\", {\"objOrder\": instance, \"form\": form,\n \"formMaterials\": formMaterials,\n \"formProjectOperations\" : formProjectOperations,\n \"formManufactoryOperations\" : formManufactoryOperations,\n \"formContractorOperations\" : formContractorOperations\n })\n\n\n else:\n\n formMaterials = RequiredMaterialFormset(instance=obj)\n\n #formOperations = RequiredOperationFormset(instance=obj)\n\n formProjectOperations = RequiredOperationProjectFormset(instance=obj)\n formManufactoryOperations = RequiredOperationManufactoryFormset(instance=obj)\n formContractorOperations = RequiredOperationContractorFormset(instance=obj)\n\n\n\n forms = RequiredOperationProject.objects.all().filter(idOrder=good_id).filter(isDone=True)\n sumProjectOperation = 0\n for form_ in forms:\n sumProjectOperation = sumProjectOperation + form_.cost\n\n\n\n forms = RequiredOperationManufactory.objects.all().filter(idOrder=good_id).filter(isDone=True)\n sumManufactoryOperation = 0\n for form_ in forms:\n sumManufactoryOperation = sumManufactoryOperation + form_.cost\n\n\n\n\n forms = RequiredOperationContractor.objects.all().filter(idOrder=good_id).filter(isDone=True)\n sumContractorOperation = 0\n for form_ in forms:\n sumContractorOperation = sumContractorOperation + form_.cost\n\n\n # objOrder для номера заказа\n return render(request, \"order_create.html\", { \"objOrder\" : obj,\n \"form\": form,\n\n \"formMaterials\" : formMaterials,\n \"formProjectOperations\" : formProjectOperations,\n \"formManufactoryOperations\" : formManufactoryOperations,\n \"formContractorOperations\" : formContractorOperations,\n\n \"sumProjectOperation\": sumProjectOperation,\n \"sumManufactoryOperation\": sumManufactoryOperation,\n \"sumContractorOperation\": sumContractorOperation,\n \"sumTotal\": sumProjectOperation + sumManufactoryOperation + sumContractorOperation\n })\n\n\n\n\n@login_required\ndef order_create(request):\n\n RequiredMaterialFormset = inlineformset_factory(Order, RequiredMaterial, fields=('idMaterial', 'count',), can_delete=True, extra=1)\n\n RequiredOperationProjectFormset = inlineformset_factory(Order, RequiredOperationProject, RequiredOperationProjectForm, can_delete=True, extra=1)\n RequiredOperationManufactoryFormset = inlineformset_factory(Order, RequiredOperationManufactory, RequiredOperationManufactoryForm, can_delete=True, extra=1)\n RequiredOperationContractorFormset = inlineformset_factory(Order, RequiredOperationContractor, RequiredOperationContractorForm, can_delete=True, extra=1)\n\n if request.method == \"POST\":\n\n orderform = OrderForm(request.POST)\n\n formMaterials = RequiredMaterialFormset(request.POST)\n\n formProjectOperations = RequiredOperationProjectFormset(request.POST)\n formManufactoryOperations = RequiredOperationManufactoryFormset(request.POST)\n formContractorOperations = RequiredOperationContractorFormset(request.POST)\n\n if orderform.is_valid() and formMaterials.is_valid() and formProjectOperations.is_valid() and formManufactoryOperations.is_valid() and formContractorOperations.is_valid():\n\n orderObj = orderform.save()\n\n #materials\n formMaterials.save(commit=False)\n for form in formMaterials:\n if form['count'].value():\n choice = form.save(commit=False)\n choice.idOrder = orderObj\n choice.save()\n\n #project operation\n formProjectOperations.save(commit=False)\n for form in formProjectOperations:\n if form['cost'].value():\n choice = form.save(commit=False)\n choice.idOrder = orderObj\n choice.save()\n\n #manufactory\n formManufactoryOperations.save(commit=False)\n for form in formManufactoryOperations:\n if form['cost'].value():\n choice = form.save(commit=False)\n choice.idOrder = orderObj\n choice.save()\n\n # contractor\n formContractorOperations.save(commit=False)\n for form in formContractorOperations:\n if form['cost'].value():\n choice = form.save(commit=False)\n choice.idOrder = orderObj\n choice.save()\n\n return orders(request)\n\n\n return render(request, \"order_create.html\", {\"form\": orderform,\n \"formMaterials\": formMaterials,\n \"formProjectOperations\" : formProjectOperations,\n \"formManufactureOperations\": formManufactoryOperations,\n \"formContractorOperations\" : formContractorOperations})\n\n\n\n form = OrderForm(None)\n\n formMaterials = RequiredMaterialFormset(queryset=RequiredMaterial.objects.none())\n\n\n formProjectOperations = RequiredOperationProjectFormset(queryset=RequiredOperationProject.objects.none())\n formManufactoryOperations = RequiredOperationManufactoryFormset(queryset=RequiredOperationManufactory.objects.none())\n formContractorOperations = RequiredOperationContractorFormset(queryset=RequiredOperationContractor.objects.none())\n\n return render(request, \"order_create.html\", {\"form\": form,\n \"formMaterials\" : formMaterials,\n \"formProjectOperations\" : formProjectOperations,\n \"formManufactoryOperations\" : formManufactoryOperations,\n \"formContractorOperations\" : formContractorOperations})\n\n\n\n\n\n@login_required\ndef store(request):\n\n materialsInStore = Storage.objects.all()\n\n\n return render(request, \"store.html\", {'materialsInStore' : materialsInStore})\n\n\n\n\n@login_required\ndef createRequestMaterials(request):\n\n Definitions = []\n\n for needMaterial in RequiredMaterial.objects.all().order_by('idMaterial'):\n for havematerial in Storage.objects.all().order_by('idMaterial'):\n if needMaterial.idMaterial == havematerial.idMaterial:\n\n Definition = {}\n Definition[\"name\"] = needMaterial.idMaterial.name\n Definition[\"needMaterial\"] = needMaterial.count\n Definition[\"havematerial\"] = havematerial.count\n diff = (needMaterial.count - havematerial.count)\n if diff < 0:\n Definition[\"toOrder\"] = 0\n else:\n Definition[\"toOrder\"] = needMaterial.count - havematerial.count\n\n Definitions.append(Definition)\n\n dillers = DillerCatalog.objects.all()\n\n return render(request, \"materialsRequest.html\", { \"Definitions\" : Definitions, 'dillers' : dillers })\n\n\n\n\n################ REPORTS\n\ndef createReportCompletedApplication(request):\n\n if request.method == \"POST\":\n\n worker_id = request.POST['worker_id']\n\n worker_info = WorkerCatalog.objects.all().filter(id = worker_id)\n\n # date1 = request.POST['date1']\n\n # date2 = request.POST['date2']\n\n month = request.POST['month']\n\n report = RequiredOperationProject.objects.all().filter(idWorker=worker_id).filter(isDoneDate__month = month)\n\n sum = 0\n for cost_ in report:\n sum = sum + cost_.cost\n\n\n return render(request, \"report_completed_application.html\", { \"worker_info\" : worker_info.first(),\n \"period\" : month,\n \"report\" : report, \"sum\" : sum } )\n\n\n\n workers = WorkerCatalog.objects.all()\n\n months = {\"January\":0, \"February\":1, \"March\":2, \"April\":3, \"May\":4, \"June\":5, \"July\":6, \"August\":7, \"September\":8, \"October\":9, \"November\":10, \"December\":11 }\n # {\n # 1 : 'January',\n # 2 : 'February',\n # 3 : 'March',\n # 4 : 'April',\n # 5 : 'May',\n # 6 : 'June',\n # 7 : 'July',\n # 8 : 'August',\n # 9 : 'September',\n # 10 : 'October',\n # 11 : 'November',\n # 12 : 'December'\n # }\n\n years = {\n '2019',\n '2020'\n }\n\n return render(request, \"report_application_choose_worker.html\", {\"workers\" : workers, \"months\" : months, \"years\" : years} )\n\n\n\ndef createReportOutstandingApplication(request):\n\n if request.method == \"POST\":\n return HttpResponse(\"в разработке\")\n\n\n\n\n Operations = []\n for needOperation in RequiredOperationProject.objects.all().filter(isDone=False).order_by('idWorker'):\n Operation = {}\n Operation[\"workerName\"] = needOperation.idWorker\n Operation[\"order\"] = needOperation.idOrder\n Operation[\"operation\"] = needOperation.idOperation\n\n Operations.append(Operation)\n\n\n workers = WorkerCatalog.objects.all()\n\n return render(request, \"report_outstanding_applications.html\", {\"workers\" : workers, \"Operations\" : Operations})\n\n\n\ndef xls(request):\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"somefilename.csv\"'\n\n writer = csv.writer(response)\n\n period = request.POST['period']\n worker_info = request.POST['worker_info']\n sum = request.POST['sum']\n\n writer.writerow([])\n writer.writerow(['ФИО',worker_info])\n writer.writerow(['месяц',period ])\n writer.writerow([])\n\n\n #table\n writer.writerow(['Договор','Операция', 'Дата выполнения', 'Стоимость' ])\n\n count = request.POST['count_']\n for val in range(0, int(count)):\n idOrder = request.POST['idOrder' + str(val)]\n idOperation = request.POST['idOperation'+ str(val)]\n isDoneDate = request.POST['isDoneDate'+ str(val)]\n cost = request.POST['cost'+ str(val)]\n writer.writerow([idOrder, idOperation, isDoneDate, cost])\n\n writer.writerow([])\n writer.writerow(['', '', 'итоговая сумма', sum])\n\n return response","sub_path":"orloveFurniture/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":15575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"599108564","text":"#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport logging\nimport os\nimport random\nimport signal\nimport threading\nimport time\n\n\ndef start_fault_injection_thread(fault_injection_rate, interval_in_sec):\n \"\"\"\n Useful tool for validating Elastic Trainer behavior under stress.\n Can be used in production workflows to ensure correct behavior of\n various components, stateful side-effects, etc. (not just for unit tests).\n \"\"\"\n\n def _fault_injection_loop():\n while True:\n x = random.random()\n if x < fault_injection_rate:\n logging.error(\n \"Fault injection triggered! roll: {}, probability: {}\".format(\n x, fault_injection_rate\n )\n )\n os.kill(os.getpid(), signal.SIGKILL)\n\n time.sleep(interval_in_sec)\n\n fault_injection_thread = threading.Thread(target=_fault_injection_loop)\n fault_injection_thread.daemon = True\n fault_injection_thread.start()\n","sub_path":"torchelastic/fault_injection.py","file_name":"fault_injection.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"559483489","text":"class RangeModule(object):\n\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\" \n \n self.value_list = []\n\n def addRange(self, left, right):\n \"\"\"\n :type left: int\n :type right: int\n :rtype: None\n \"\"\"\n \n start = self.helper(left)\n end = self.helper(right + 1)\n \n if start % 2:\n start -= 1\n left = self.value_list[start]\n \n if end % 2:\n right = self.value_list[end]\n end += 1\n \n self.value_list = self.value_list[:start] + [left, right] + self.value_list[end:]\n\n def queryRange(self, left, right):\n \"\"\"\n :type left: int\n :type right: int\n :rtype: bool\n \"\"\"\n \n start = self.helper(left + 1)\n end = self.helper(right)\n \n if start == end and start % 2:\n \n return True\n\t\t\n return False\n\n def removeRange(self, left, right):\n \"\"\"\n :type left: int\n :type right: int\n :rtype: None\n \"\"\"\n \n temp_list = []\n start = self.helper(left)\n end = self.helper(right + 1)\n\n if start % 2:\n start -= 1\n temp_list.append(self.value_list[start])\n temp_list.append(left)\n \n if end % 2:\n temp_list.append(right)\n temp_list.append(self.value_list[end])\n end += 1\n\t\t\t\n self.value_list = self.value_list[:start] + temp_list + self.value_list[end:]\n \n def helper(self, target):\n low = 0\n high = len(self.value_list)\n \n while low < high:\n mid = (low + high) // 2\n \n if self.value_list[mid] < target:\n low = mid + 1\n else:\n high = mid\n \n return low \n\n\n# Your RangeModule object will be instantiated and called as such:\n# obj = RangeModule()\n# obj.addRange(left,right)\n# param_2 = obj.queryRange(left,right)\n# obj.removeRange(left,right)","sub_path":"practice/solution/0715_range_module.py","file_name":"0715_range_module.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"50788402","text":"import os \nimport numpy as np\nimport matplotlib.pyplot as plt\nimport keras\nfrom keras.preprocessing.image import load_img, smart_resize\nfrom params import *\nfrom utils import * \n\ndef create_generators(data_path=DATASET_PATH):\n 'Returns three generators'\n # image_paths = []\n # for folder in os.listdir(data_path):\n # paths_to_add = [os.path.join(folder, path) for path in os.listdir(os.path.join(data_path, folder)) if path.endswith('jpg')]\n # image_paths = image_paths + paths_to_add\n\n train_list, val_list, test_list = data_split(np.asarray(image_paths))\n\n train_data_generator = DataGeneratorClassifier(train_list, TRAINING_BATCH_SIZE, TRAINING_IMAGE_SIZE)\n validation_data_generator = DataGeneratorClassifier(val_list, VALIDATION_BATCH_SIZE, VALIDATION_IMAGE_SIZE)\n # test_data_generator = DataGeneratorClassifier(test_list, TESTING_BATCH_SIZE, TESTING_IMAGE_SIZE)\n return train_data_generator, validation_data_generator\n\n\ndef data_split(paths_list):\n 'Splits the paths list into three splits'\n split_1 = int(0.6*len(paths_list))\n split_2 = int(0.8*len(paths_list))\n np.random.shuffle(paths_list)\n return paths_list[:split_1], paths_list[split_1:split_2], paths_list[split_2:]\n\n\nclass DataGeneratorClassifier(tf.keras.utils.Sequence):\n 'Generates data for Keras'\n def __init__(self, list_IDs, batch_size, image_size, data_path=DATASET_PATH, n_channels=NUMBER_OF_CHANNELS, shuffle=SHUFFLE_DATA,nmbr_image_par_video =NMBR_OF_FRAME_PER_VIDEO ):\n 'Initialisation'\n self.classes = os.listdir(data_path)\n self.image_size = image_size\n self.batch_size = batch_size\n self.list_IDs = list_IDs\n self.n_channels = n_channels\n self.shuffle = shuffle\n self.on_epoch_end()\n self.data_path=data_path\n self.nmbr_image_par_video = nmbr_image_par_video\n\n def __len__(self):\n 'Denotes the number of batches per epoch'\n return int(np.floor(len(self.list_IDs) / self.batch_size))\n\n def __getitem__(self, index):\n 'Generate one batch of data'\n indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]\n list_IDs_temp = self.list_IDs[indexes]\n X, y = self.__data_generation(list_IDs_temp)\n \n return X, y\n\n def on_epoch_end(self):\n 'Updates indexes after each epoch'\n self.indexes = np.arange(len(self.list_IDs))\n if self.shuffle == True:\n np.random.shuffle(self.indexes)\n\n# à modifier \n def __data_generation(self, list_IDs_temp):\n 'Generates data containing batch_size samples' # X : (n_samples, *image_size, n_channels)\n X = np.empty((self.batch_size,self.nmbr_image_par_video, *self.image_size, self.n_channels))\n y = np.empty((self.batch_size), dtype=int)\n for i, ID in enumerate(list_IDs_temp):\n path = self.data_path+'/'+ID # à revérifier pour le path mais sinon cela va prendre le path de chaque image dans le folder \n Xi = load_video(cv2.VideoCapture(path),*self.image_size)# tous les Xi sont de longueur différente \n X[i,:] = Xi # Xi longeur variable mais pas X[i,:] risque de problème de dimension\n\n y[i] = self.classes.index(ID.split('/')[0])\n\n return X,keras.utils.to_categorical(y,num_classes=6)\n\ndef show_batch(generator, batch_number=0):\n images, labels = generator.__getitem__(batch_number)\n width = int(np.floor(np.sqrt(labels.shape[0])))\n height = int(np.ceil(labels.shape[0]/float(width)))\n total_height = int(0.09*height*images.shape[1])\n total_width = int(0.09*width*images.shape[2])\n f, axarr = plt.subplots(height,width, figsize=(total_height,total_width))\n for image in range(images.shape[0]):\n image_to_show = (images[image])/np.max(images[image])\n axarr[image//width,image%width].imshow(image_to_show)\n axarr[image//width,image%width].set_title(generator.classes[np.argmax(labels[image])])\n f.tight_layout()\n plt.show()\n\n","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":4003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"11547383","text":"import os\nimport json\n\nfrom cornice import Service\nfrom pyramid.httpexceptions import HTTPNotFound\n\nfrom daybed.validators import definition_validator, token_validator\n\n\ndefinition = Service(name='definition',\n path='/definitions/{model_name}',\n description='Model Definition',\n renderer=\"jsonp\",\n cors_origins=('*',))\n\n\n@definition.get()\ndef get(request):\n \"\"\"Retrieves the model definition.\"\"\"\n model_name = request.matchdict['model_name']\n definition = request.db.get_definition(model_name)\n if definition:\n return definition['definition']\n raise HTTPNotFound(detail=\"Unknown model %s\" % model_name)\n\n\n@definition.put(validators=(token_validator, definition_validator))\ndef put(request):\n \"\"\"Create or update a model definition.\n\n Checks that the data is a valid model definition.\n In the case of a modification, checks that the token is valid and present.\n\n \"\"\"\n model_name = request.matchdict['model_name']\n\n # Generate a unique token\n token = os.urandom(40).encode('hex')\n\n model_doc = {\n 'type': 'definition',\n 'name': model_name,\n 'definition': json.loads(request.body),\n 'token': token,\n }\n request.db.save(model_doc)\n return {'token': token}\n\n\n@definition.delete(validators=token_validator)\ndef delete(request):\n \"\"\"Create or update a model definition.\n\n Checks that the data is a valid model definition.\n In the case of a modification, checks that the token is valid and present.\n\n \"\"\"\n model_name = request.matchdict['model_name']\n\n results = request.db.get_data(model_name)\n for result in results:\n request.db.db.delete(result.value)\n\n result = request.db.get_definition(model_name)\n request.db.db.delete(result)\n","sub_path":"daybed/views/definition.py","file_name":"definition.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"436337088","text":"import acm\nimport FUxCore\n\nfrom IndexSearchConsumer import ScopeTaskConsumer\nfrom IndexSearchConsumer import QueryTaskConsumer\n\nfrom IndexSearchUtils import unicode_decode\n\nimport timeit\n\nclass ResultInfo :\n def __init__(self):\n self.m_infos = []\n self.m_page = 0\n self.m_pageCount = 0\n self.m_offset = 0\n self.m_foundCount = 0\n self.m_pageLength = 0\n self.m_total = 0\n self.m_suggestion = 0\n\n\nclass IndexSearchDialog (FUxCore.LayoutDialog):\n def __init__(self, query = ''):\n self.m_openBtn = None\n self.m_searchInput = None\n self.m_suggestionLink = None\n self.m_suggestionInfo = None\n self.m_list = None\n self.m_resultInfo = None\n self.m_previousLink = None\n self.m_nextLink = None\n self.m_typed = False\n self.m_progressCtrl = None\n self.m_currentPage = 1\n self.m_query = query\n self.m_scopePaceConsumer = ScopeTaskConsumer.Create(self.IndexName(), self.OnScopeTaskConsumerResult, self.OnScopeTaskConsumerProgress, self.OnScopeState, self.OnScopeInitialPopulateDone)\n self.m_queryPaceConsumer = QueryTaskConsumer(self.OnQueryTaskConsumerResult)\n self.m_startTime = timeit.default_timer()\n\n def OnScopeState(self, status, statusText):\n pass\n\n def OnScopeInitialPopulateDone(self) :\n pass\n\n def OnScopeTaskConsumerResult(self, result):\n pass\n\n def OnQueryTaskConsumerResult(self, consumerResults):\n resultInfo = ResultInfo()\n resultInfo.m_foundCount = consumerResults.foundCount\n resultInfo.m_page = consumerResults.page\n resultInfo.m_pageCount = consumerResults.pageCount\n resultInfo.m_offset = consumerResults.offset\n resultInfo.m_pageLength = consumerResults.pageLength\n resultInfo.m_total = consumerResults.total\n resultInfo.m_suggestion = consumerResults.suggestion\n\n for searchResult in consumerResults.searchResults :\n di = searchResult.displayInformation\n info = {\n 'name': di.label.formatString,\n 'icon': di.icon\n }\n for keyValue in searchResult.keyValues :\n info[keyValue.key] = keyValue.value\n\n if info :\n info['moniker'] = searchResult.moniker\n resultInfo.m_infos.append(info)\n\n self.PopulateList(resultInfo, timeit.default_timer() - self.m_startTime )\n\n def OnScopeTaskConsumerProgress(self, percent, progressText):\n if self.m_progressCtrl:\n self.m_progressCtrl.SetData(percent)\n\n def HandleApply( self ):\n self.Open()\n return None\n\n def Caption(self) :\n return ''\n\n def HandleDestroy(self):\n self.m_queryPaceConsumer.Destroy()\n\n def HandleCreate( self, dlg, layout):\n self.m_fuxDlg = dlg\n self.m_fuxDlg.Caption(self.Caption())\n\n self.m_openBtn = layout.GetControl('ok')\n\n self.m_searchInput = layout.GetControl('searchInput')\n self.m_searchInput.AddCallback( 'Changed', self.OnSearchChanged, self )\n \n self.m_suggestionLink = layout.GetControl('suggestionLink')\n self.m_suggestionLink.AddCallback('Activate', self.OnSuggestionLinkClicked, None)\n self.m_suggestionLink.Visible(False)\n\n self.m_suggestionInfo = layout.GetControl('suggestionInfo')\n self.m_suggestionInfo.Visible(False)\n\n self.m_list = layout.GetControl('resultList')\n self.m_list.AddCallback('DefaultAction', self.OnListClicked, self)\n self.m_list.AddCallback('SelectionChanged', self.OnListSelChanged, self)\n self.m_list.AddCallback( 'ContextMenu', self.OnListContextMenu, self)\n\n self.m_list.ShowColumnHeaders(True)\n self.PopulateColumns()\n self.m_list.EnableHeaderSorting(True)\n\n #self.m_fuxDlg.RegisterTimer( self.OnTimer, 100) \n\n self.m_progressCtrl = layout.GetControl('indexProgress')\n\n self.m_resultInfo = layout.GetControl('resultInfo')\n #self.m_resultInfo.Editable(False)\n\n self.m_previousLink = layout.GetControl('previousLink')\n self.m_nextLink = layout.GetControl('nextLink')\n\n self.m_previousLink.SetData('Previous')\n self.m_nextLink.SetData('Next')\n\n self.m_previousLink.Enabled(False)\n self.m_nextLink.Enabled(False)\n\n self.m_previousLink.AddCallback('Activate', self.OnPreviousClicked, None)\n self.m_nextLink.AddCallback('Activate', self.OnNextClicked, None)\n\n self.m_searchInput.SetData(self.m_query)\n\n self.UpdateControls()\n\n if self.m_query :\n self.m_typed = True\n\n\n def PopulateColumns(self):\n pass\n\n def UpdateControls(self) :\n self.m_openBtn.Enabled(self.m_list.GetSelectedItem() != None)\n\n def OnSuggestionLinkClicked(self, cd, ud) :\n query = self.m_suggestionLink.GetData()\n self.m_suggestionLink.SetData('')\n self.m_suggestionLink.Visible(False)\n self.m_suggestionInfo.Visible(False)\n\n def OnPreviousClicked(self, cd, ud) :\n self.DoSearch(self.m_currentPage - 1)\n\n def OnNextClicked(self, cd, ud) :\n self.DoSearch(self.m_currentPage + 1)\n\n def OnTimer(self, ud):\n if self.m_typed :\n elapsed = timeit.default_timer() - self.m_startTypedTime\n\n if elapsed > 0.25 :\n self.DoSearch(1)\n self.m_typed = False\n\n def IndexName(self) :\n return u''\n\n def DoSearch(self, page) :\n query = unicode_decode(self.m_searchInput.GetData())\n self.m_currentPage = page\n self.m_startTime = timeit.default_timer()\n\n self.m_queryPaceConsumer.DoSearch(query, page, 20, self.m_scopePaceConsumer.PaceConsumer(), self.m_scopePaceConsumer.Scope())\n \n def OnSearchChanged(self, ud, cd) :\n #if not self.m_typed :\n # self.m_startTypedTime = timeit.default_timer()\n\n #self.m_typed = True\n self.DoSearch(1)\n\n def Open(self):\n pass \n\n def OnListClicked(self, ud, cd):\n self.Open() \n\n def OnListSelChanged(self, ud, cd):\n self.UpdateControls()\n\n def BuildContextMenu(self, menuBuilder):\n pass\n\n def OnListContextMenu(self, ud, cd):\n self.BuildContextMenu(cd.At('menuBuilder'))\n def AddListItem(self, root, info, select) :\n pass\n\n def UpdateResultInfo(self, resultInfo, elapsed_time) :\n found = resultInfo.m_foundCount\n page = resultInfo.m_page\n start = resultInfo.m_offset\n end = start + resultInfo.m_pageLength\n pageCount = resultInfo.m_pageCount\n total = resultInfo.m_total\n\n if total == 0:\n start = 0\n end = 0\n elif start != 0 :\n end -= 1\n else:\n start += 1\n \n \n s = 'Page %d of %d (%d-%d of %d) in %0.5f seconds' % (page, pageCount, start, end, total, elapsed_time)\n self.m_resultInfo.SetData(s)\n \n self.m_nextLink.Enabled(resultInfo.m_page < resultInfo.m_pageCount)\n self.m_previousLink.Enabled(resultInfo.m_page > 1)\n\n def UpdateSuggestionIfNeeded(self, resultInfo) :\n self.m_suggestionLink.SetData(str(resultInfo.m_suggestion))\n self.m_suggestionLink.Visible(True if resultInfo.m_suggestion else False)\n self.m_suggestionInfo.Visible(True if resultInfo.m_suggestion else False)\n\n def PopulateList(self, resultInfo, elapsed_time):\n self.m_list.RemoveAllItems()\n root = self.m_list.GetRootItem()\n self.UpdateSuggestionIfNeeded(resultInfo)\n self.UpdateResultInfo(resultInfo, elapsed_time)\n first = True\n for info in resultInfo.m_infos :\n if info :\n self.AddListItem(root, info, first)\n first = False\n\n self.UpdateControls()\n\n\n def CreateLayout(self):\n b = acm.FUxLayoutBuilder()\n b.BeginVertBox()\n b. BeginHorzBox()\n b. AddInput('searchInput', '')\n b. EndBox()\n b. BeginVertBox()\n b. AddLabel('suggestionInfo', 'Did you mean:')\n b. AddHyperLink('suggestionLink')\n b. AddList('resultList', 20, -1, 177)\n b. EndBox()\n b. BeginHorzBox()\n b. AddHyperLink('previousLink', 40, 40)\n b. AddFill()\n b. AddLabel('resultInfo', ' ')\n b. AddFill()\n b. AddHyperLink('nextLink', 25, 25)\n b. EndBox()\n b. AddProgress('indexProgress', 1, 10, -1, 10)\n b. BeginHorzBox()\n b. AddFill()\n b. AddButton('ok', 'Open')\n b. AddButton('cancel', 'Close')\n b. EndBox()\n b.EndBox()\n return b\n","sub_path":"Extensions/IndexSearch/FPythonCode/IndexSearchDialog.py","file_name":"IndexSearchDialog.py","file_ext":"py","file_size_in_byte":8852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"168825301","text":"# -*- coding: UTF-8 -*-\n'''\n@author: xuqiang\n'''\nimport tensorflow as tf\n\nq = tf.FIFOQueue(capacity=5, dtypes=tf.int32)\n# the initial process should be sepcifal call\ninit = q.enqueue_many(([0, 10, 6, 5, 9],))\nx = q.dequeue()\ny = x + 1\nq_in = q.enqueue(vals=[y])\nwith tf.Session() as sess:\n sess.run(init)\n # init.run()\n for _ in range(10):\n v, _ = sess.run([x, q_in])\n print(v)\n","sub_path":"src/demo/section7-TFRecord/queue-and-thread/FIFOqueue.py","file_name":"FIFOqueue.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"456333053","text":"def maximumSubarray(A, low, high):\n '''To find a maximum subarray.\n A: The array.\n low: Startng index.\n high: Ending index.'''\n\n if low == high:\n return (low, high, A[low])\n else:\n mid = (low + high) // 2\n low_l, high_l, max_l = maximumSubarray(A, low, mid)\n low_r, high_r, max_r = maximumSubarray(A, mid+1, high)\n low_cross, high_cross, max_cross = maximumSubarrayCross(A, low, mid, high)\n\n if max_l >= max_r and max_l >= max_cross:\n return (low_l, high_l, max_l)\n elif max_r >= max_l and max_r >= max_cross:\n return (low_r, high_r, max_r)\n else:\n return (low_cross, high_cross, max_cross)\n\n\ndef maximumSubarrayCross(A, low, mid, high):\n '''A subroutine of maximumSubarray. To find maximum cross subarray.'''\n\n sum_left = A[mid]\n current_sum = A[mid]\n max_left = mid\n for i in range(mid-1, low-1, -1):\n current_sum += A[i]\n if current_sum > sum_left:\n sum_left = current_sum\n max_left = i\n\n sum_right = A[mid+1]\n current_sum = A[mid+1]\n max_right = mid+1\n for i in range(mid+2, high+1):\n current_sum += A[i]\n if current_sum > sum_right:\n sum_right = current_sum\n max_right = i\n\n return (max_left, max_right, sum_left + sum_right)\n\n\nif __name__ == '__main__':\n arr1 = [13,-3,-25,20,-3,-16,-23,18,20,-7,12,-5,-22,15,-4,7]\n arr2 = [1, -4, 3, -4]\n print(maximumSubarray(arr1, 0, len(arr1)-1))\n print(maximumSubarray(arr2, 0, len(arr2)-2))\n","sub_path":"ch4/4.1/maximum_subarray_recur.py","file_name":"maximum_subarray_recur.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"546996434","text":"import requests\nimport json\n\nfrom typing import Union, Tuple, List, Any, Dict\n\nfrom rest_framework.views import Request\n\nfrom .BaseRequester import BaseRequester\n\nclass PublisherRequester(BaseRequester):\n TOKENS = {\n\n }\n\n URL = ''\n\n def __init__(self):\n self.URL = self.URLS['PUBLISHER']\n\n def __user_exists(self, request: Request, u_id: int) -> bool:\n from api.requesters.UserRequester import UserRequester\n\n _, code = UserRequester().user(request = request, id_ = u_id)\n\n return code == 200\n\n def __user_by_token(self, request: Request) -> Tuple[Dict[str, str], int]:\n from api.requesters.UserRequester import UserRequester\n\n u_json, code = UserRequester().info(request = request)\n\n return u_json, code\n\n def publishers(self, request: Request) -> Tuple[Dict[str, str], int]:\n url = self.URL\n\n u_json, code = self.__user_by_token(request)\n\n if code != 200:\n return (u_json, code)\n\n url += f'?user_id={u_json[\"id\"]}'\n\n limitoffset = self._limit_offset_from_request(request)\n\n if limitoffset:\n url += f'&limit={limitoffset[0]}&offset={limitoffset[1]}'\n\n response = self.get(\n url = url,\n )\n\n response_json, code = self._process_response(\n response = response,\n task_name = 'PUBLISHERS'\n )\n\n response_json = self._safe_next_previous_link(\n response = response\n )\n\n return (response_json, response.status_code)\n\n def publisher(self, request: Request, uuid: str) -> Tuple[Dict[str, str], int]:\n response = self.get(\n url = self.URL + f'{uuid}/'\n )\n\n return self._process_response(\n response = response,\n task_name = 'PUBLISHER'\n )\n\n def post_publisher(self, request: Request, data: dict) -> Tuple[dict, int]:\n # check request and data\n\n response = self.post(\n url = self.URL,\n data = data\n )\n\n return response._process_response(\n response = response,\n task_name = 'POST_PUBLISHER'\n )\n\n def patch_publisher(self, request: Request, data: dict, uuid: str) -> Tuple[Dict[str, str], int]:\n # check request and data\n\n response = self.patch(\n url = self.URL + f'{uuid}/',\n data = data\n )\n\n return response._process_response(\n response = response,\n task_name = 'PATCH_PUBLISHER'\n )\n\n def delete_publisher(self, request: Request, data: dict, uuid: str) -> Tuple[Dict[str, str], int]:\n publisher_json, code = self.publisher(request, uuid)\n\n if code != 200:\n return publisher_json, code\n\n response = self.delete(\n url = self.URL + f'{uuid}/',\n data = data\n )\n\n return response._process_response(\n response = response,\n task_name = 'DELETE_PUBLISHER'\n )\n","sub_path":"gateway/api/requesters/PublisherRequester.py","file_name":"PublisherRequester.py","file_ext":"py","file_size_in_byte":2993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"625044440","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\n fslib.setup\n ~~~~~~~~~~~\n\n setuptools script\n\n This file is part of fs_filepicker.\n\n :copyright: Copyright 2017-2018 Reimar Bauer\n :license: APACHE-2.0, see LICENSE for details.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\n# The README.txt file should be written in reST so that PyPI can use\n# it to generate your project's PyPI page.\nfrom fslib.version import __version__\nfrom setuptools import setup, find_packages\n\nlong_description = open(\"README.rst\").read()\n\nsetup(\n name=\"fs_filepicker\",\n version=__version__,\n description=\"QT Filepicker for pyfilesystem2\",\n long_description=long_description,\n classifiers=\"Development Status :: 5 - Production/Stable\",\n keywords=\"fs\",\n maintainer=\"Reimar Bauer\",\n maintainer_email=\"rb.proj@gmail.com\",\n author=\"Reimar Bauer\",\n author_email=\"rb.proj@gmail.com\",\n license=\"Apache 2.0\",\n url=\"https://github.com/ReimarBauer/fs_filepicker\",\n platforms=\"any\",\n packages=find_packages(),\n namespace_packages=[],\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n \"fs==2.4.16\",\n \"future==0.18.3\",\n \"humanfriendly==10.0\",\n ], # we use conda build recipe\n entry_points={\n \"console_scripts\": [\n \"fs_filepicker=fslib.fs_filepicker:main\",\n ]\n },\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"463392711","text":"import pygame\nimport random\nimport ctypes\n\nappname = \"Minhocada\"\n\noptions_file = \"options.txt\"\ncolours_file = \"colours.txt\"\nscores_file= \"scores.txt\"\n\nfont = \"candara\"\n\n# Cores\nbranco = (255, 255, 255)\npreto = (0, 0, 0)\nazul = (0, 0, 255)\nvermelho = (255, 0, 0)\namarelo = (255, 255, 0)\nverde = (0, 255, 0)\ncastanho = (139,69,19)\naqua = (0, 255, 255)\nbege = (251, 196, 159)\nlaranja = (231, 118, 5)\nrosa = (244, 0, 161)\nroxo = (127, 2, 164)\nazul_mesclado = (127, 0, 255)\nverde_amarelado = (216, 223, 104)\ndourado = (218, 165, 32)\nperola = (245, 245, 220)\nprateado = (192, 192, 192)\nrosa_claro = (255, 164, 214)\nverde_escuro = (2, 54, 5)\nverde_tropa = (77, 88, 48)\nverde_estranho = (7, 92, 80)\ncastanho_claro = (165, 128, 100)\nlilas = (156, 124, 222)\nferrugem = (183, 65, 14)\nazul_celeste = (50, 173, 234)\nazul_claro = (175, 228, 255)\nmarinho_escuro = (29, 0, 89)\nmarinho = (18, 10, 143)\ncinzento = (97, 97, 97)\nrosa_mesclado = (255, 0, 127)\npurpura = (72, 0, 96)\nrosa_escuro = (199, 21, 133)\n\n\nColours = {0 : aqua, 1 : azul, 2 : vermelho, 3 : amarelo, 4 : verde, 5 : castanho, 6 : perola, 7 : bege, 8 : castanho_claro, 9 : ferrugem, 10 : laranja, 11 : dourado, 12 : verde_amarelado, 13 : verde_estranho, 14 : verde_tropa, 15 : verde_escuro, 16 : azul_claro, 17 : azul_celeste, 18 : azul_mesclado, 19 : marinho, 20 : marinho_escuro, 21 : lilas, 22 : roxo, 23 : purpura, 24 : rosa_claro, 25 : rosa, 26 : rosa_mesclado, 27 : rosa_escuro, 28 : prateado, 29 : cinzento}\n\n\npygame.init()\ndisplayInfo = pygame.display.Info()\n\n#Start app\nl = 10\n\ntitle = \"MINHOCADA\"\ntitle_colours = (vermelho, laranja, amarelo, verde, aqua, marinho, roxo, rosa_mesclado, perola)\nif len(title) != len(title_colours):\n title_colours = Colours\n\nctypes.windll.user32.SetProcessDPIAware()\nscreen_x = int((ctypes.windll.user32.GetSystemMetrics(0) // l) * l)\nscreen_y = int((ctypes.windll.user32.GetSystemMetrics(1) // l) * l)\nGame_Display = pygame.display.set_mode((screen_x,screen_y), pygame.FULLSCREEN)\npygame.display.set_caption(appname)\n\ntimer = pygame.time.Clock()\nmenu_fps = 50\n\n# Colocar texto no ecrã\ndef put_text(display, color, x, y, text):\n# x -> 1 a 7 / y -> 1 a 8\n if y == 2 or color == preto:\n bold = True\n else:\n bold = False\n size = min(screen_x, screen_y) // 10\n part_x = screen_x // 7\n part_y = screen_y // 8\n tipo_de_letra = pygame.font.SysFont(font, size, bold)\n letras = tipo_de_letra.render(text, True, color)\n texto = letras.get_rect()\n texto.center = (int((x - 0.5) * part_x), int((y - 0.5) * part_y))\n display.blit(letras, texto)\n pygame.display.update()\n return\n\n\n# Colocar título no ecrã\ndef put_title_text(display, color, y, text):\n# x -> 1 a len(text) * 2 // y -> 1 a 8\n bold = True\n size = min(screen_x, screen_y) // 7\n part_x = screen_x // (len(text) * 2)\n part_y = screen_y // 8\n tipo_de_letra = pygame.font.SysFont(font, size, bold)\n for x in range(1 , len(text) + 1):\n index = x - 1\n x += len(text) / 2\n char = text[index]\n letra = tipo_de_letra.render(char, True, color[index])\n texto = letra.get_rect()\n texto.center = (int((x - 0.5) * part_x), int((y - 0.5) * part_y))\n display.blit(letra, texto)\n pygame.display.update()\n return\n\n\n# Decide qual dos menus vai ser aberto\ndef a_mod_b (a,b):\n c = a % b\n if c < 0:\n c += b\n return c\n\n\n# Verificar ficheiro\ndef check_file(file_name, default_text):\n try:\n File = open(file_name, \"r\")\n except FileNotFoundError:\n File = open(file_name, \"x\")\n File.close()\n File = open(file_name, \"w\")\n File.write(default_text)\n finally:\n File.close()\n return\n\n\n# Verificar ficheiro de opções, ficheiro de cores e ficheiro de pontuações\ndef check_files():\n\n# Ficheiro de Opções\n check_file(options_file, \"Number of Players: 5 \\nNumber of Cookies: 1 \\nLimits: 0 \\nCollision: 0 \\nDifficulty: 5\")\n \n# Ficheiro de Cores\n check_file(colours_file, \"Player 1: 1\\nPlayer 2: 2\\nPlayer 3: 3\\nPlayer 4: 4\\nPlayer 5: 5\")\n \n# Ficheiro de Pontuações\n check_file(scores_file, \"0 0 0 0 0 0 0 0\")\n \n return\n\n\n# Atualizar ficheiro de pontuações\ndef update_scores(score):\n FileScores = open(scores_file, \"r\")\n scoreboard = FileScores.read()\n FileScores.close()\n scores = [int(x) for x in scoreboard.split()]\n scores.append(score)\n scores =sorted(scores, reverse = True)\n scores.pop()\n scores = [str(x) for x in scores]\n writing = \" \".join(scores)\n if writing != scoreboard:\n FileScores = open(scores_file, \"w\")\n FileScores.write(writing)\n FileScores.close()\n return\n\n\n# Loop principal do programa\ndef loop():\n exitall = False\n check_files()\n while not exitall:\n leave_main_menu = main_menu()\n if type(leave_main_menu) == bool:\n exitall = True\n elif leave_main_menu == 0:\n gameover_menu()\n elif leave_main_menu == 1:\n options_loop()\n elif leave_main_menu == 2:\n score_menu()\n elif leave_main_menu == 3:\n exitall = exit_menu()\n\n\n# Menu 1\ndef main_menu():\n menu_to_open = 0\n exitmenu = False\n Game_Display.fill(preto)\n put_title_text(Game_Display, title_colours, 2, title)\n pygame.display.update()\n while not exitmenu:\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RETURN:\n exitmenu = True\n break\n if event.key == pygame.K_UP:\n menu_to_open -= 1\n if event.key == pygame.K_DOWN:\n menu_to_open += 1\n if event.key == pygame.K_ESCAPE:\n return False\n menu_to_open = a_mod_b(menu_to_open, 4)\n if menu_to_open == 0:\n put_text(Game_Display, aqua, 4, 4, \"PLAY\")\n put_text(Game_Display, branco, 4, 5, \"OPTIONS\")\n put_text(Game_Display, branco, 4, 6, \"SCOREBOARD\")\n put_text(Game_Display, branco, 4, 7, \"EXIT\")\n elif menu_to_open == 1:\n put_text(Game_Display, branco, 4, 4, \"PLAY\")\n put_text(Game_Display, aqua, 4, 5, \"OPTIONS\")\n put_text(Game_Display, branco, 4, 6, \"SCOREBOARD\")\n put_text(Game_Display, branco, 4, 7, \"EXIT\")\n elif menu_to_open == 2:\n put_text(Game_Display, branco, 4, 4, \"PLAY\")\n put_text(Game_Display, branco, 4, 5, \"OPTIONS\")\n put_text(Game_Display, aqua, 4, 6, \"SCOREBOARD\")\n put_text(Game_Display, branco, 4, 7, \"EXIT\")\n elif menu_to_open == 3:\n put_text(Game_Display, branco, 4, 4, \"PLAY\")\n put_text(Game_Display, branco, 4, 5, \"OPTIONS\")\n put_text(Game_Display, branco, 4, 6, \"SCOREBOARD\")\n put_text(Game_Display, aqua, 4, 7, \"EXIT\")\n pygame.display.update()\n timer.tick(menu_fps)\n return menu_to_open\n\n\n# Menu 2\ndef exit_menu():\n menu_to_open = 0\n exitmenu = False\n Game_Display.fill(preto)\n pygame.display.update()\n while not exitmenu:\n for event in pygame.event.get():\n put_text(Game_Display, branco, 4, 2, \"EXIT\")\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RETURN:\n exitmenu = True\n break\n if event.key == pygame.K_RIGHT:\n menu_to_open += 1\n if event.key == pygame.K_LEFT:\n menu_to_open -= 1\n menu_to_open = a_mod_b(menu_to_open, 2)\n if menu_to_open == 0:\n put_text(Game_Display, aqua , 2, 6, \"NO\")\n put_text(Game_Display, branco, 6, 6, \"YES\")\n else:\n put_text(Game_Display, branco, 2, 6, \"NO\")\n put_text(Game_Display, aqua, 6, 6, \"YES\")\n pygame.display.update()\n timer.tick(menu_fps)\n if menu_to_open == 0:\n exitapp = False\n else:\n exitapp = True\n return exitapp\n\n\n# Menu 5\ndef gameover_menu():\n score = game_menu()\n exitmenu = False\n Game_Display.fill(preto)\n put_text(Game_Display, branco, 4, 2, \"GAMEOVER\")\n put_text(Game_Display, branco, 3, 4, \"SCORE\")\n put_text(Game_Display, branco, 6, 4, str(score))\n put_text(Game_Display, aqua, 4, 7, \"MAIN MENU\")\n pygame.display.update()\n while not exitmenu:\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RETURN:\n exitmenu = True\n break\n timer.tick(menu_fps)\n update_scores(score)\n return\n\n\n# Menu 7\ndef score_menu():\n exitmenu = False\n Game_Display.fill(preto)\n FileScores = open(scores_file, \"r\")\n scoreboard = FileScores.read()\n FileScores.close()\n scores = scoreboard.split()\n \n put_text(Game_Display, branco, 4, 2, \"SCOREBOARD\")\n put_text(Game_Display, branco, 2, 4, scores[0])\n put_text(Game_Display, branco, 2, 5, scores[1])\n put_text(Game_Display, branco, 2, 6, scores[2])\n put_text(Game_Display, branco, 2, 7, scores[3])\n put_text(Game_Display, branco, 6, 4, scores[4])\n put_text(Game_Display, branco, 6, 5, scores[5])\n put_text(Game_Display, branco, 6, 6, scores[6])\n put_text(Game_Display, branco, 6, 7, scores[7])\n put_text(Game_Display, aqua, 4, 8, \"BACK\")\n pygame.display.update()\n \n while not exitmenu:\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RETURN:\n exitmenu = True\n break\n timer.tick(menu_fps)\n return\n\n\n# Menu 4\ndef pause_menu():\n menu_to_open = 0\n exitmenu = False\n Game_Display.fill(preto)\n put_text(Game_Display, branco, 4, 2, \"PAUSE\")\n put_text(Game_Display, aqua , 4, 5, \"UNPAUSE\")\n put_text(Game_Display, branco, 4, 7, \"MAIN MENU\")\n pygame.display.update()\n while not exitmenu:\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RETURN:\n exitmenu = True\n if event.key == pygame.K_UP:\n menu_to_open -= 1\n if event.key == pygame.K_DOWN:\n menu_to_open += 1\n menu_to_open = a_mod_b(menu_to_open, 2)\n if menu_to_open == 0:\n put_text(Game_Display, aqua , 4, 5, \"UNPAUSE\")\n put_text(Game_Display, branco, 4, 7, \"MAIN MENU\")\n else:\n put_text(Game_Display, branco, 4, 5, \"UNPAUSE\")\n put_text(Game_Display, aqua, 4, 7, \"MAIN MENU\")\n pygame.display.update()\n timer.tick(menu_fps)\n if menu_to_open == 0:\n exitgame = False\n else:\n exitgame = True\n return exitgame\n\n\n# Menu 3\ndef game_menu():\n \n # Propriedades dos jogadores\n class Player:\n def __init__ (self, numero, posx, posy, cor):\n \n self.n = numero\n self.x = posx\n self.y = posy\n \n self.direcao = \"stop\"\n \n self.cor = Colours[cor]\n \n \n # Ficheiro com as opções de jogo\n FileOptions = open(options_file, \"r\")\n opcoes = FileOptions.readlines()\n FileOptions.close()\n \n # Ficheiro com as cores dos jogadores\n FileColours = open(colours_file, \"r\")\n cores = FileColours.readlines()\n FileColours.close()\n \n # Start game\n numplayers = int(opcoes[0].split()[-1])\n numcookies = int(opcoes[1].split()[-1])\n borders = int(opcoes[2].split()[-1])\n choques = int(opcoes[3].split()[-1])\n difficulty = int(opcoes[4].split()[-1])\n \n cor1 = int(cores[0].split()[-1])\n cor2 = int(cores[1].split()[-1])\n cor3 = int(cores[2].split()[-1])\n cor4 = int(cores[3].split()[-1])\n cor5 = int(cores[4].split()[-1])\n \n options = {\"Number of Players\" : numplayers, \"Number of Cookies\" : numcookies, \"Difficulty\" : difficulty + 1, \"Limitless\" : 1 - borders, \"Collisions\" : choques}\n \n Snakes = {}\n # Criar as cobrinhas de cada jogador (grupo de posições ocupadas pela cauda de cada cobrinha)\n if options[\"Number of Players\"] >= 1:\n x1 = int(((screen_x * (1 / 4)) // l) * l)\n y1 = int(((screen_y * (1 / 4)) // l) * l)\n p1 = Player (1, x1, y1, cor1)\n Snakes [1] = []\n \n if options[\"Number of Players\"] >= 2:\n x2 = int(((screen_x * (3 / 4)) // l) * l)\n y2 = int(((screen_y * (1 / 4)) // l) * l)\n p2 = Player (2, x2, y2, cor2)\n Snakes [2] = []\n \n if options[\"Number of Players\"] >= 3:\n x3 = int(((screen_x * (1 / 4)) // l) * l)\n y3 = int(((screen_y * (3 / 4)) // l) * l)\n p3 = Player (3, x3, y3, cor3)\n Snakes [3] = []\n \n if options[\"Number of Players\"] >= 4:\n x4 = int(((screen_x * (3 / 4)) // l) * l)\n y4 = int(((screen_y * (3 / 4)) // l) * l)\n p4 = Player (4, x4, y4, cor4)\n Snakes [4] = []\n \n if options[\"Number of Players\"] >= 5:\n x5 = int(((screen_x * (2 / 4)) // l) * l)\n y5 = int(((screen_y * (2 / 4)) // l) * l)\n p5 = Player (5, x5, y5, cor5)\n Snakes [5] = []\n \n\n # Definir os Controls para cada jogador\n def Controls(key):\n direcao = (0, \"stop\")\n \n # Player 1 : setas\n if key == pygame.K_UP:\n direcao = (1,\"cima\")\n elif key == pygame.K_DOWN:\n direcao = (1,\"baixo\")\n elif key == pygame.K_RIGHT:\n direcao = (1,\"direita\")\n elif key == pygame.K_LEFT:\n direcao = (1,\"esquerda\")\n \n # Player 2 : WASD\n if key == pygame.K_w:\n direcao = (2,\"cima\")\n elif key == pygame.K_s:\n direcao = (2,\"baixo\")\n elif key == pygame.K_d:\n direcao = (2,\"direita\")\n elif key == pygame.K_a:\n direcao = (2,\"esquerda\")\n \n # Player 3 : TFGH\n if key == pygame.K_t:\n direcao = (3,\"cima\")\n elif key == pygame.K_g:\n direcao = (3,\"baixo\")\n elif key == pygame.K_h:\n direcao = (3,\"direita\")\n elif key == pygame.K_f:\n direcao = (3,\"esquerda\")\n \n # Player 4 : \"IJKL\"\n if key == pygame.K_i:\n direcao = (4,\"cima\")\n elif key == pygame.K_k:\n direcao = (4,\"baixo\")\n elif key == pygame.K_l:\n direcao = (4,\"direita\")\n elif key == pygame.K_j:\n direcao = (4,\"esquerda\")\n \n # Player 5 : \"8456\"\n if key == pygame.K_KP8:\n direcao = (5,\"cima\")\n elif key == pygame.K_KP5:\n direcao = (5,\"baixo\")\n elif key == pygame.K_KP6:\n direcao = (5,\"direita\")\n elif key == pygame.K_KP4:\n direcao = (5,\"esquerda\")\n \n return direcao\n \n \n # Criar a bolacha\n def create_cookie():\n posCx = random.randrange(int(screen_x // l)) * l\n posCy = random.randrange(int(screen_y // l)) * l\n return (posCx, posCy)\n \n \n # Verificar se saiu fora dos limites (em Limitless == 1) e trazer o jogador de volta para dentro\n def go_out_of_bounds (posx, posy):\n if posx < 0:\n posx = posx + screen_x\n elif posx >= screen_x:\n posx = posx - screen_x\n if posy < 0:\n posy = posy + screen_y\n elif posy >= screen_y:\n posy = posy - screen_y\n return (posx, posy)\n \n\n # Verificar se saiu fora dos limites (em Limitless == 0) e parar o jogo caso tenha saído\n def crash (posx, posy):\n if posx < 0 or posx >= screen_x or posy < 0 or posy >= screen_y:\n exitgame = True\n else:\n exitgame = False\n return exitgame\n \n\n # Verificar se algum jogador comeu a bolacha, devolvendo o número do jogador\n def eating_cookies(posCx, posCy):\n if options[\"Number of Players\"] >= 1 and p1.x == posCx and p1.y == posCy:\n return 1\n elif options[\"Number of Players\"] >= 2 and p2.x == posCx and p2.y == posCy:\n return 2\n elif options[\"Number of Players\"] >= 3 and p3.x == posCx and p3.y == posCy:\n return 3\n elif options[\"Number of Players\"] >= 4 and p4.x == posCx and p4.y == posCy:\n return 4\n elif options[\"Number of Players\"] >= 5 and p5.x == posCx and p5.y == posCy:\n return 5\n else:\n return 0\n \n \n # Atualizar as posições dos jogadores\n def update (p):\n if p.direcao == \"cima\":\n p.y -= 10\n elif p.direcao == \"baixo\":\n p.y += 10\n elif p.direcao == \"direita\":\n p.x += 10\n elif p.direcao == \"esquerda\":\n p.x -= 10\n return(p.x, p.y)\n \n \n # Verificar se a cobrinha bateu na própria cauda\n def check_collisions(Snake_path):\n colisao = False\n for index in range(len(Snake_path)):\n if index != (len(Snake_path) - 1):\n for index2 in range(len(Snake_path[index + 1:])):\n if Snake_path[index] == Snake_path[index + 1:][index2]:\n colisao = True\n return colisao\n \n \n # Verificar se a cobrinha bateu noutra cobrinha\n def check_collisions_inter_snakes (Snake_path_a, Snake_path_b):\n colisao = False\n for position_a in Snake_path_a:\n for position_b in Snake_path_b:\n if position_a == position_b:\n colisao = True\n return colisao\n \n\n # Loop do jogo\n def maingame():\n \n # Criar a bolacha\n if options[\"Number of Cookies\"] >= 1: \n (posC1x, posC1y) = create_cookie()\n if options[\"Number of Cookies\"] >= 2:\n (posC2x, posC2y) = create_cookie()\n # Verificar se não gerou a cookie num local onde já tinha outra cookie\n while (posC2x, posC2y) == (posC1x, posC1y):\n (posC2x, posC2y) = create_cookie()\n if options[\"Number of Cookies\"] >= 3:\n (posC3x, posC3y) = create_cookie()\n # Verificar se não gerou a cookie num local onde já tinha outra cookie\n while (posC3x, posC3y) == (posC1x, posC1y) or (posC3x, posC3y) == (posC2x, posC2y):\n (posC3x, posC3y) = create_cookie()\n if options[\"Number of Cookies\"] >= 4:\n (posC4x, posC4y) = create_cookie()\n # Verificar se não gerou a cookie num local onde já tinha outra cookie\n while ((posC4x, posC4y) == (posC1x, posC1y)) or ((posC4x, posC4y) == (posC2x, posC2y)) or ((posC4x, posC4y) == (posC3x, posC3y)):\n (posC4x, posC4y) = create_cookie()\n if options[\"Number of Cookies\"] >= 5:\n (posC5x, posC5y) = create_cookie()\n # Verificar se não gerou a cookie num local onde já tinha outra cookie\n while ((posC5x, posC5y) == (posC1x, posC1y)) or ((posC5x, posC5y) == (posC2x, posC2y)) or ((posC5x, posC5y) == (posC3x, posC3y)) or ((posC5x, posC5y) == (posC4x, posC4y)):\n (posC5x, posC5y) = create_cookie()\n cookie_color = branco\n score = 0\n \n exitgame = False\n \n while not exitgame:\n \n # Limpar o display e colocar a cookie\n Game_Display.fill(preto)\n if options[\"Number of Cookies\"] >= 1: \n pygame.draw.rect(Game_Display, cookie_color , (posC1x, posC1y, l, l))\n if options[\"Number of Cookies\"] >= 2:\n pygame.draw.rect(Game_Display, cookie_color , (posC2x, posC2y, l, l))\n if options[\"Number of Cookies\"] >= 3:\n pygame.draw.rect(Game_Display, cookie_color , (posC3x, posC3y, l, l))\n if options[\"Number of Cookies\"] >= 4:\n pygame.draw.rect(Game_Display, cookie_color , (posC4x, posC4y, l, l))\n if options[\"Number of Cookies\"] >= 5:\n pygame.draw.rect(Game_Display, cookie_color , (posC5x, posC5y, l, l))\n \n \n # Dependendo do número de jogadores:\n # Verificar o jogador saiu dos limites\n # Colocar a posição atual na lista de posições daquele jogador\n # Desenhar todas as posições daquela cobrinha minhoca no ecrã\n \n if options[\"Number of Players\"] >= 1:\n if options[\"Limitless\"] == 1:\n (p1.x, p1.y) = go_out_of_bounds(p1.x, p1.y)\n else:\n exitgame = exitgame or crash (p1.x, p1.y)\n Snakes[1].append((p1.x,p1.y))\n for pos1x,pos1y in Snakes[1]:\n pygame.draw.rect(Game_Display, p1.cor , (pos1x, pos1y, l, l))\n \n if options[\"Number of Players\"] >= 2:\n if options[\"Limitless\"] == 1:\n (p2.x, p2.y) = go_out_of_bounds(p2.x, p2.y)\n else:\n exitgame = exitgame or crash (p2.x, p2.y)\n Snakes[2].append((p2.x,p2.y))\n for pos2x,pos2y in Snakes[2]:\n pygame.draw.rect(Game_Display, p2.cor , (pos2x, pos2y, l, l))\n \n if options[\"Number of Players\"] >= 3:\n if options[\"Limitless\"] == 1:\n (p3.x, p3.y) = go_out_of_bounds(p3.x, p3.y)\n else:\n exitgame = exitgame or crash (p3.x, p3.y)\n Snakes[3].append((p3.x,p3.y))\n for pos3x,pos3y in Snakes[3]:\n pygame.draw.rect(Game_Display, p3.cor , (pos3x, pos3y, l, l))\n \n if options[\"Number of Players\"] >= 4:\n if options[\"Limitless\"] == 1:\n (p4.x, p4.y) = go_out_of_bounds(p4.x, p4.y)\n else:\n exitgame = exitgame or crash (p4.x, p4.y)\n Snakes[4].append((p4.x,p4.y))\n for pos4x,pos4y in Snakes[4]:\n pygame.draw.rect(Game_Display, p4.cor , (pos4x, pos4y, l, l))\n \n if options[\"Number of Players\"] >= 5:\n if options[\"Limitless\"] == 1:\n (p5.x, p5.y) = go_out_of_bounds(p5.x, p5.y)\n else:\n exitgame = exitgame or crash (p5.x, p5.y)\n Snakes[5].append((p5.x,p5.y))\n for pos5x,pos5y in Snakes[5]:\n pygame.draw.rect(Game_Display, p5.cor , (pos5x, pos5y, l, l))\n \n # Verificar os eventos ocorridos, atualizar as direções, sair se necessário\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n d = Controls(event.key)\n if d != (0, \"stop\"):\n if d[0] == 1:\n if ((p1.direcao == \"stop\") or (p1.direcao == \"cima\" and d[1] != \"baixo\") or (p1.direcao == \"baixo\" and d[1] != \"cima\") or (p1.direcao == \"direita\" and d[1] != \"esquerda\") or (p1.direcao == \"esquerda\" and d[1] != \"direita\")):\n p1.direcao = d[1]\n elif d[0] == 2:\n if ((p2.direcao == \"stop\") or (p2.direcao == \"cima\" and d[1] != \"baixo\") or (p2.direcao == \"baixo\" and d[1] != \"cima\") or (p2.direcao == \"direita\" and d[1] != \"esquerda\") or (p2.direcao == \"esquerda\" and d[1] != \"direita\")):\n p2.direcao = d[1]\n elif d[0] == 3:\n if ((p3.direcao == \"stop\") or (p3.direcao == \"cima\" and d[1] != \"baixo\") or (p3.direcao == \"baixo\" and d[1] != \"cima\") or (p3.direcao == \"direita\" and d[1] != \"esquerda\") or (p3.direcao == \"esquerda\" and d[1] != \"direita\")):\n p3.direcao = d[1]\n elif d[0] == 4:\n if ((p4.direcao == \"stop\") or (p4.direcao == \"cima\" and d[1] != \"baixo\") or (p4.direcao == \"baixo\" and d[1] != \"cima\") or (p4.direcao == \"direita\" and d[1] != \"esquerda\") or (p4.direcao == \"esquerda\" and d[1] != \"direita\")):\n p4.direcao = d[1]\n elif d[0] == 5:\n if ((p5.direcao == \"stop\") or (p5.direcao == \"cima\" and d[1] != \"baixo\") or (p5.direcao == \"baixo\" and d[1] != \"cima\") or (p5.direcao == \"direita\" and d[1] != \"esquerda\") or (p5.direcao == \"esquerda\" and d[1] != \"direita\")):\n p5.direcao = d[1]\n \n if event.key == pygame.K_p:\n exitgame = exitgame or pause_menu()\n \n if event.key == pygame.K_ESCAPE:\n exitgame = True\n \n # Atualizar a posição\n if options[\"Number of Players\"] >= 1:\n (p1.x , p1.y) = update (p1)\n if options[\"Number of Players\"] >= 2:\n (p2.x , p2.y) = update (p2)\n if options[\"Number of Players\"] >= 3:\n (p3.x , p3.y) = update (p3)\n if options[\"Number of Players\"] >= 4:\n (p4.x , p4.y) = update (p4)\n if options[\"Number of Players\"] >= 5:\n (p5.x , p5.y) = update (p5)\n \n # Verificar se bateram na própria cauda\n if options[\"Number of Players\"] >= 1:\n exitgame = exitgame or check_collisions(Snakes[1])\n if options[\"Number of Players\"] >= 2:\n exitgame = exitgame or check_collisions(Snakes[2])\n if options[\"Number of Players\"] >= 3:\n exitgame = exitgame or check_collisions(Snakes[3])\n if options[\"Number of Players\"] >= 4:\n exitgame = exitgame or check_collisions(Snakes[4])\n if options[\"Number of Players\"] >= 5:\n exitgame = exitgame or check_collisions(Snakes[5])\n \n # Verificar se bateram umas nas outras\n if options[\"Collisions\"] == 1:\n if options[\"Number of Players\"] >= 2:\n exitgame = exitgame or check_collisions_inter_snakes(Snakes[2], Snakes[1])\n if options[\"Number of Players\"] >= 3:\n exitgame = exitgame or check_collisions_inter_snakes(Snakes[3], Snakes[1])\n exitgame = exitgame or check_collisions_inter_snakes(Snakes[3], Snakes[2])\n if options[\"Number of Players\"] >= 4:\n exitgame = exitgame or check_collisions_inter_snakes(Snakes[4], Snakes[1])\n exitgame = exitgame or check_collisions_inter_snakes(Snakes[4], Snakes[2])\n exitgame = exitgame or check_collisions_inter_snakes(Snakes[4], Snakes[3])\n if options[\"Number of Players\"] >= 5:\n exitgame = exitgame or check_collisions_inter_snakes(Snakes[5], Snakes[1])\n exitgame = exitgame or check_collisions_inter_snakes(Snakes[5], Snakes[2])\n exitgame = exitgame or check_collisions_inter_snakes(Snakes[5], Snakes[3])\n exitgame = exitgame or check_collisions_inter_snakes(Snakes[5], Snakes[4])\n \n # Verificar se comeram a bolacha\n if options[\"Number of Cookies\"] >= 1:\n eatcookie1 = eating_cookies(posC1x, posC1y)\n eatcookie2 = 0\n eatcookie3 = 0\n eatcookie4 = 0\n eatcookie5 = 0\n if options[\"Number of Cookies\"] >= 2:\n eatcookie2 = eating_cookies(posC2x, posC2y)\n if options[\"Number of Cookies\"] >= 3:\n eatcookie3 = eating_cookies(posC3x, posC3y)\n if options[\"Number of Cookies\"] >= 4:\n eatcookie4 = eating_cookies(posC4x, posC4y)\n if options[\"Number of Cookies\"] >= 5:\n eatcookie5 = eating_cookies(posC5x, posC5y)\n \n # Recolocar a bolacha no caso de ter sido comida\n if options[\"Number of Cookies\"] >= 1 and eatcookie1 != 0:\n (posC1x, posC1y) = create_cookie()\n score += 1\n if options[\"Number of Cookies\"] >= 2 and eatcookie2 != 0:\n (posC2x, posC2y) = create_cookie()\n # Verificar se não gerou a cookie num local onde já tinha outra cookie\n while (posC2x, posC2y) == (posC1x, posC1y):\n (posC2x, posC2y) = create_cookie()\n score += 1\n if options[\"Number of Cookies\"] >= 3 and eatcookie3 != 0:\n (posC3x, posC3y) = create_cookie()\n # Verificar se não gerou a cookie num local onde já tinha outra cookie\n while ((posC3x, posC3y) == (posC1x, posC1y)) or ((posC3x, posC3y) == (posC2x, posC2y)):\n (posC3x, posC3y) = create_cookie()\n score += 1\n if options[\"Number of Cookies\"] >= 4 and eatcookie4 != 0:\n (posC4x, posC4y) = create_cookie()\n # Verificar se não gerou a cookie num local onde já tinha outra cookie\n while ((posC4x, posC4y) == (posC1x, posC1y)) or ((posC4x, posC4y) == (posC2x, posC2y)) or ((posC4x, posC4y) == (posC3x, posC3y)):\n (posC4x, posC4y) = create_cookie()\n score += 1\n if options[\"Number of Cookies\"] >= 5 and eatcookie5 != 0:\n (posC5x, posC5y) = create_cookie()\n # Verificar se não gerou a cookie num local onde já tinha outra cookie\n while ((posC5x, posC5y) == (posC1x, posC1y)) or ((posC5x, posC5y) == (posC2x, posC2y)) or ((posC5x, posC5y) == (posC3x, posC3y)) or ((posC5x, posC5y) == (posC4x, posC4y)):\n (posC5x, posC5y) = create_cookie()\n score += 1\n \n # Reposicionar as minhoquinhas que não comeram bolachina\n if options[\"Number of Players\"] >= 1 and eatcookie1 != 1 and eatcookie2 != 1 and eatcookie3 != 1 and eatcookie4 != 1 and eatcookie5 != 1:\n Snakes[1] = Snakes[1][1:]\n if options[\"Number of Players\"] >= 2 and eatcookie1 != 2 and eatcookie2 != 2 and eatcookie3 != 2 and eatcookie4 != 2 and eatcookie5 != 2:\n Snakes[2] = Snakes[2][1:]\n if options[\"Number of Players\"] >= 3 and eatcookie1 != 3 and eatcookie2 != 3 and eatcookie3 != 3 and eatcookie4 != 3 and eatcookie5 != 3:\n Snakes[3] = Snakes[3][1:]\n if options[\"Number of Players\"] >= 4 and eatcookie1 != 4 and eatcookie2 != 4 and eatcookie3 != 4 and eatcookie4 != 4 and eatcookie5 != 4:\n Snakes[4] = Snakes[4][1:]\n if options[\"Number of Players\"] >= 5 and eatcookie1 != 5 and eatcookie2 != 5 and eatcookie3 != 5 and eatcookie4 != 5 and eatcookie5 != 5:\n Snakes[5] = Snakes[5][1:]\n \n timer.tick(options[\"Difficulty\"] * 10)\n pygame.display.update()\n \n return score\n \n score = maingame()\n return score\n\n\n# Menu 6\ndef options_menu():\n \n def clear_text(x, y, c, l):\n # x -> 1 a 7 / y -> 1 a 8\n # c é o comprimento em x / l é a largura em y\n part_x = screen_x // 7\n part_y = screen_y // 8\n pygame.draw.rect(Game_Display, preto, (int(((x - 0.5) * part_x) - c*screen_x/2), int(((y - 0.5) * part_y) - l*screen_y/2), c*screen_x, l*screen_y))\n pygame.display.update()\n return\n \n \n FileOptions = open(options_file, \"r\")\n conteudo = FileOptions.readlines()\n FileOptions.close()\n numplayers = int(conteudo[0].split()[-1]) - 1\n numcookies = int(conteudo[1].split()[-1]) - 1\n borders = int(conteudo[2].split()[-1])\n choques = int(conteudo[3].split()[-1])\n difficulty = int(conteudo[4].split()[-1])\n traducao = { 0 : \"NO\" , 1 : \"YES\" }\n dificuldade = { 0:\"I\", 1:\"II\", 2:\"III\", 3:\"IV\", 4:\"V\", 5:\"VI\", 6:\"VII\", 7:\"VIII\", 8:\"IX\", 9:\"X\" }\n \n exitmenu = False\n action = 0\n page = 0\n action_anterior = 0\n Game_Display.fill (preto)\n put_text(Game_Display, branco, 4, 2, \"SETTINGS\")\n put_text(Game_Display, branco, 3, 4, \"PLAYERS\")\n put_text(Game_Display, branco, 6, 4, str(numplayers + 1))\n put_text(Game_Display, branco, 3, 5, \"COOKIES\")\n put_text(Game_Display, branco, 6, 5, str(numcookies + 1))\n put_text(Game_Display, branco, 3, 6, \"DIFFICULTY\")\n put_text(Game_Display, branco, 6, 6, dificuldade[difficulty])\n put_text(Game_Display, branco, 4, 7, \". PAGE 1 >\")\n clear_text(3, 7, 135/1920, 90/1080)\n put_text(Game_Display, aqua , 4, 8, \"BACK\")\n pygame.display.update()\n \n while not exitmenu:\n for event in pygame.event.get():\n atualizar = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_UP:\n action -= 1\n atualizar = True\n if event.key == pygame.K_DOWN:\n action += 1\n atualizar = True\n \n # Se alterar o valor, limpa o valor anterior antes\n if event.key == pygame.K_RIGHT:\n if action == 1 and page == 0:\n if action_anterior == 1:\n clear_text(6, 4, 100/1920, 100/1080)\n numplayers += 1\n atualizar = True\n elif action == 2 and page == 0:\n if action_anterior == 2:\n clear_text(6, 5, 100/1920, 100/1080)\n numcookies += 1\n atualizar = True\n elif action == 3 and page == 0:\n if action_anterior == 3:\n clear_text(6, 6, 150/1920, 90/1080)\n difficulty += 1\n atualizar = True\n elif action == 4 and page == 0:\n if action_anterior == 4:\n clear_text(3, 4, 1000/1920, 100/1080)\n clear_text(6, 4, 100/1920, 100/1080)\n clear_text(3, 5, 1000/1920, 100/1080)\n clear_text(6, 5, 100/1920, 100/1080)\n clear_text(3, 6, 1000/1920, 100/1080)\n clear_text(6, 6, 150/1920, 90/1080)\n clear_text(4, 7, 1000/1920, 100/1080)\n page = 1\n action = 3\n atualizar = True\n elif action == 1 and page == 1:\n if action_anterior == 1:\n clear_text(6, 4, 170/1920, 90/1080)\n borders += 1\n atualizar = True\n elif action == 2 and page == 1:\n if action_anterior == 2:\n clear_text(6, 5, 170/1920, 90/1080)\n choques += 1\n atualizar = True\n \n if event.key == pygame.K_LEFT:\n if action == 1 and page == 0:\n if action_anterior == 1:\n clear_text(6, 4, 100/1920, 100/1080)\n numplayers -= 1\n atualizar = True\n elif action == 2 and page == 0:\n if action_anterior == 2:\n clear_text(6, 5, 100/1920, 100/1080)\n numcookies -= 1\n atualizar = True\n elif action == 3 and page == 0:\n if action_anterior == 3:\n clear_text(6, 6, 150/1920, 90/1080)\n difficulty -= 1\n atualizar = True\n elif action == 1 and page == 1:\n if action_anterior == 1:\n clear_text(6, 4, 170/1920, 90/1080)\n borders-= 1\n atualizar = True\n elif action == 2 and page == 1:\n if action_anterior == 2:\n clear_text(6, 5, 170/1920, 90/1080)\n choques -= 1\n atualizar = True\n elif action == 3 and page == 1:\n if action_anterior == 3:\n clear_text(3, 4, 1000/1920, 100/1080)\n clear_text(6, 4, 170/1920, 90/1080)\n clear_text(3, 5, 1000/1920, 100/1080)\n clear_text(6, 5, 170/1920, 90/1080)\n clear_text(4, 7, 1000/1920, 100/1080)\n page = 0\n action = 4\n atualizar = True\n \n if event.key == pygame.K_RETURN and action == 0:\n exitmenu = True\n \n page = a_mod_b (page , 2)\n numplayers = a_mod_b (numplayers, 5)\n numcookies = a_mod_b (numcookies, 5)\n borders = a_mod_b (borders , 2)\n choques = a_mod_b (choques , 2)\n difficulty = a_mod_b (difficulty, 10)\n if page == 0:\n action = a_mod_b (action , 5)\n else:\n action = a_mod_b (action , 4)\n \n # Desenhar de novo o necessário\n if atualizar:\n if action == 0 and page == 0:\n put_text(Game_Display, branco, 3, 4, \"PLAYERS\")\n put_text(Game_Display, branco, 6, 4, str(numplayers + 1))\n put_text(Game_Display, branco, 3, 5, \"COOKIES\")\n put_text(Game_Display, branco, 6, 5, str(numcookies + 1))\n put_text(Game_Display, branco, 3, 6, \"DIFFICULTY\")\n put_text(Game_Display, branco, 6, 6, dificuldade[difficulty])\n put_text(Game_Display, branco, 4, 7, \" PAGE 1 >\")\n put_text(Game_Display, aqua , 4, 8, \"BACK\")\n elif action == 1 and page == 0:\n put_text(Game_Display, aqua , 3, 4, \"PLAYERS\")\n put_text(Game_Display, aqua , 6, 4, str(numplayers + 1))\n put_text(Game_Display, branco, 3, 5, \"COOKIES\")\n put_text(Game_Display, branco, 6, 5, str(numcookies + 1))\n put_text(Game_Display, branco, 3, 6, \"DIFFICULTY\")\n put_text(Game_Display, branco, 6, 6, dificuldade[difficulty])\n put_text(Game_Display, branco, 4, 7, \" PAGE 1 >\")\n put_text(Game_Display, branco, 4, 8, \"BACK\")\n elif action == 2 and page == 0:\n put_text(Game_Display, branco, 3, 4, \"PLAYERS\")\n put_text(Game_Display, branco, 6, 4, str(numplayers + 1))\n put_text(Game_Display, aqua , 3, 5, \"COOKIES\")\n put_text(Game_Display, aqua , 6, 5, str(numcookies + 1))\n put_text(Game_Display, branco, 3, 6, \"DIFFICULTY\")\n put_text(Game_Display, branco, 6, 6, dificuldade[difficulty])\n put_text(Game_Display, branco, 4, 7, \" PAGE 1 >\")\n put_text(Game_Display, branco, 4, 8, \"BACK\")\n elif action == 3 and page == 0:\n put_text(Game_Display, branco, 3, 4, \"PLAYERS\")\n put_text(Game_Display, branco, 6, 4, str(numplayers + 1))\n put_text(Game_Display, branco, 3, 5, \"COOKIES\")\n put_text(Game_Display, branco, 6, 5, str(numcookies + 1))\n put_text(Game_Display, aqua , 3, 6, \"DIFFICULTY\")\n put_text(Game_Display, aqua , 6, 6, dificuldade[difficulty])\n put_text(Game_Display, branco, 4, 7, \" PAGE 1 >\")\n put_text(Game_Display, branco, 4, 8, \"BACK\")\n elif action == 4 and page == 0:\n put_text(Game_Display, branco, 3, 4, \"PLAYERS\")\n put_text(Game_Display, branco, 6, 4, str(numplayers + 1))\n put_text(Game_Display, branco, 3, 5, \"COOKIES\")\n put_text(Game_Display, branco, 6, 5, str(numcookies + 1))\n put_text(Game_Display, branco, 3, 6, \"DIFFICULTY\")\n put_text(Game_Display, branco, 6, 6, dificuldade[difficulty])\n put_text(Game_Display, aqua , 4, 7, \" PAGE 1 >\")\n put_text(Game_Display, branco, 4, 8, \"BACK\")\n elif action == 0 and page == 1:\n put_text(Game_Display, branco, 3, 4, \"BORDERS\")\n put_text(Game_Display, branco, 6, 4, traducao[borders])\n put_text(Game_Display, branco, 3, 5, \"COLLISIONS\")\n put_text(Game_Display, branco, 6, 5, traducao[choques])\n put_text(Game_Display, branco, 4, 7, \"< PAGE 2 \")\n put_text(Game_Display, aqua , 4, 8, \"BACK\")\n elif action == 1 and page == 1:\n put_text(Game_Display, aqua , 3, 4, \"BORDERS\")\n put_text(Game_Display, aqua , 6, 4, traducao[borders])\n put_text(Game_Display, branco, 3, 5, \"COLLISIONS\")\n put_text(Game_Display, branco, 6, 5, traducao[choques])\n put_text(Game_Display, branco, 4, 7, \"< PAGE 2 \")\n put_text(Game_Display, branco, 4, 8, \"BACK\")\n elif action == 2 and page == 1:\n put_text(Game_Display, branco, 3, 4, \"BORDERS\")\n put_text(Game_Display, branco, 6, 4, traducao[borders])\n put_text(Game_Display, aqua , 3, 5, \"COLLISIONS\")\n put_text(Game_Display, aqua , 6, 5, traducao[choques])\n put_text(Game_Display, branco, 4, 7, \"< PAGE 2 \")\n put_text(Game_Display, branco, 4, 8, \"BACK\")\n elif action == 3 and page == 1:\n put_text(Game_Display, branco, 3, 4, \"BORDERS\")\n put_text(Game_Display, branco, 6, 4, traducao[borders])\n put_text(Game_Display, branco, 3, 5, \"COLLISIONS\")\n put_text(Game_Display, branco, 6, 5, traducao[choques])\n put_text(Game_Display, aqua , 4, 7, \"< PAGE 2 \")\n put_text(Game_Display, branco, 4, 8, \"BACK\")\n \n action_anterior = action\n pygame.display.update()\n timer.tick(menu_fps)\n \n FileOptions = open(options_file, \"w\")\n FileOptions.write(\"Number of Players: {0}\\nNumber of Cookies: {1}\\nLimits: {2}\\nCollision: {3}\\nDifficulty: {4}\".format(numplayers + 1, numcookies + 1, borders, choques, difficulty))\n FileOptions.close()\n \n return\n\n\n# Menu 8\ndef configs_menu():\n menu_to_open = 0\n exitmenu = False\n Game_Display.fill(preto)\n put_text(Game_Display, branco, 4, 2, \"OPTIONS\")\n pygame.display.update()\n while not exitmenu:\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RETURN:\n exitmenu = True\n break\n if event.key == pygame.K_UP:\n menu_to_open -= 1\n if event.key == pygame.K_DOWN:\n menu_to_open += 1\n menu_to_open = a_mod_b(menu_to_open, 4)\n if menu_to_open == 0:\n put_text(Game_Display, branco, 4, 4, \"SETTINGS\")\n put_text(Game_Display, branco, 4, 5, \"CONTROLS\")\n put_text(Game_Display, branco, 4, 6, \"COLOURS\")\n put_text(Game_Display, aqua , 4, 7, \"BACK\")\n elif menu_to_open == 1:\n put_text(Game_Display, aqua , 4, 4, \"SETTINGS\")\n put_text(Game_Display, branco, 4, 5, \"CONTROLS\")\n put_text(Game_Display, branco, 4, 6, \"COLOURS\")\n put_text(Game_Display, branco, 4, 7, \"BACK\")\n elif menu_to_open == 2:\n put_text(Game_Display, branco, 4, 4, \"SETTINGS\")\n put_text(Game_Display, aqua , 4, 5, \"CONTROLS\")\n put_text(Game_Display, branco, 4, 6, \"COLOURS\")\n put_text(Game_Display, branco, 4, 7, \"BACK\")\n elif menu_to_open == 3:\n put_text(Game_Display, branco, 4, 4, \"SETTINGS\")\n put_text(Game_Display, branco, 4, 5, \"CONTROLS\")\n put_text(Game_Display, aqua , 4, 6, \"COLOURS\")\n put_text(Game_Display, branco, 4, 7, \"BACK\")\n pygame.display.update()\n timer.tick(menu_fps)\n return menu_to_open\n\n\n# Menu 9\ndef controls_menu():\n exitmenu = False\n Game_Display.fill(preto)\n \n put_text(Game_Display, branco, 4, 2, \"CONTROLS\")\n put_text(Game_Display, branco, 2, 3, \"Player 1\")\n put_text(Game_Display, branco, 2, 4, \"Player 2\")\n put_text(Game_Display, branco, 2, 5, \"Player 3\")\n put_text(Game_Display, branco, 2, 6, \"Player 4\")\n put_text(Game_Display, branco, 2, 7, \"Player 5\")\n put_text(Game_Display, branco, 6, 3, \" ARROWS\")\n put_text(Game_Display, branco, 6, 4, \"W S A D\")\n put_text(Game_Display, branco, 6, 5, \"T G F H\")\n put_text(Game_Display, branco, 6, 6, \"I K J L\")\n put_text(Game_Display, branco, 6, 7, \"8 5 4 6\")\n put_text(Game_Display, aqua , 4, 8, \"BACK\")\n pygame.display.update()\n \n while not exitmenu:\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RETURN:\n exitmenu = True\n break\n timer.tick(menu_fps)\n return\n\n\n# Menu 10\ndef colours_menu():\n \n def put_circle(display, cor, x, y, r):\n part_x = screen_x // 7\n part_y = screen_y // 8\n pygame.draw.circle(display, cor, (int(((x - 0.5) * part_x)) , int(((y - 0.5) * part_y))) , r, 0)\n pygame.display.update()\n return\n \n FileColours = open(colours_file, \"r\")\n conteudo = FileColours.readlines()\n FileColours.close()\n \n cor1 = int(conteudo[0].split()[-1])\n cor2 = int(conteudo[1].split()[-1])\n cor3 = int(conteudo[2].split()[-1])\n cor4 = int(conteudo[3].split()[-1])\n cor5 = int(conteudo[4].split()[-1])\n \n exitmenu = False\n action = 0\n Game_Display.fill (preto)\n put_text(Game_Display, branco, 4, 2, \"COLOURS\")\n\n pygame.display.update()\n \n while not exitmenu:\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_UP:\n action -= 1\n if event.key == pygame.K_DOWN:\n action += 1\n \n if event.key == pygame.K_RIGHT:\n if action == 1:\n cor1 += 1\n elif action == 2:\n cor2 += 1\n elif action == 3:\n cor3 += 1\n elif action == 4:\n cor4 += 1\n elif action == 5:\n cor5 += 1\n \n if event.key == pygame.K_LEFT:\n if action == 1:\n cor1 -= 1\n elif action == 2:\n cor2 -= 1\n elif action == 3:\n cor3 -= 1\n elif action == 4:\n cor4 -= 1\n elif action == 5:\n cor5 -= 1\n \n if event.key == pygame.K_RETURN and action == 0:\n exitmenu = True\n \n cor1 = a_mod_b (cor1, len(Colours))\n cor2 = a_mod_b (cor2, len(Colours))\n cor3 = a_mod_b (cor3, len(Colours))\n cor4 = a_mod_b (cor4, len(Colours))\n cor5 = a_mod_b (cor5, len(Colours))\n action = a_mod_b(action, 6)\n \n# Desenhar o necessário\n if action == 0:\n put_text(Game_Display, branco, 3, 3, \"PLAYER 1\")\n put_circle(Game_Display, Colours[cor1], 6, 3, 50)\n put_text(Game_Display, branco, 3, 4, \"PLAYER 2\")\n put_circle(Game_Display, Colours[cor2], 6, 4, 50)\n put_text(Game_Display, branco, 3, 5, \"PLAYER 3\")\n put_circle(Game_Display, Colours[cor3], 6, 5, 50)\n put_text(Game_Display, branco, 3, 6, \"PLAYER 4\")\n put_circle(Game_Display, Colours[cor4], 6, 6, 50)\n put_text(Game_Display, branco, 3, 7, \"PLAYER 5\")\n put_circle(Game_Display, Colours[cor5], 6, 7, 50)\n put_text(Game_Display, aqua , 3, 8, \"BACK\")\n elif action == 1:\n put_text(Game_Display, aqua , 3, 3, \"PLAYER 1\")\n put_circle(Game_Display, Colours[cor1], 6, 3, 50)\n put_text(Game_Display, branco, 3, 4, \"PLAYER 2\")\n put_circle(Game_Display, Colours[cor2], 6, 4, 50)\n put_text(Game_Display, branco, 3, 5, \"PLAYER 3\")\n put_circle(Game_Display, Colours[cor3], 6, 5, 50)\n put_text(Game_Display, branco, 3, 6, \"PLAYER 4\")\n put_circle(Game_Display, Colours[cor4], 6, 6, 50)\n put_text(Game_Display, branco, 3, 7, \"PLAYER 5\")\n put_circle(Game_Display, Colours[cor5], 6, 7, 50)\n put_text(Game_Display, branco, 3, 8, \"BACK\")\n elif action == 2:\n put_text(Game_Display, branco, 3, 3, \"PLAYER 1\")\n put_circle(Game_Display, Colours[cor1], 6, 3, 50)\n put_text(Game_Display, aqua , 3, 4, \"PLAYER 2\")\n put_circle(Game_Display, Colours[cor2], 6, 4, 50)\n put_text(Game_Display, branco, 3, 5, \"PLAYER 3\")\n put_circle(Game_Display, Colours[cor3], 6, 5, 50)\n put_text(Game_Display, branco, 3, 6, \"PLAYER 4\")\n put_circle(Game_Display, Colours[cor4], 6, 6, 50)\n put_text(Game_Display, branco, 3, 7, \"PLAYER 5\")\n put_circle(Game_Display, Colours[cor5], 6, 7, 50)\n put_text(Game_Display, branco, 3, 8, \"BACK\")\n elif action == 3:\n put_text(Game_Display, branco, 3, 3, \"PLAYER 1\")\n put_circle(Game_Display, Colours[cor1], 6, 3, 50)\n put_text(Game_Display, branco, 3, 4, \"PLAYER 2\")\n put_circle(Game_Display, Colours[cor2], 6, 4, 50)\n put_text(Game_Display, aqua , 3, 5, \"PLAYER 3\")\n put_circle(Game_Display, Colours[cor3], 6, 5, 50)\n put_text(Game_Display, branco, 3, 6, \"PLAYER 4\")\n put_circle(Game_Display, Colours[cor4], 6, 6, 50)\n put_text(Game_Display, branco, 3, 7, \"PLAYER 5\")\n put_circle(Game_Display, Colours[cor5], 6, 7, 50)\n put_text(Game_Display, branco, 3, 8, \"BACK\")\n elif action == 4:\n put_text(Game_Display, branco, 3, 3, \"PLAYER 1\")\n put_circle(Game_Display, Colours[cor1], 6, 3, 50)\n put_text(Game_Display, branco, 3, 4, \"PLAYER 2\")\n put_circle(Game_Display, Colours[cor2], 6, 4, 50)\n put_text(Game_Display, branco, 3, 5, \"PLAYER 3\")\n put_circle(Game_Display, Colours[cor3], 6, 5, 50)\n put_text(Game_Display, aqua , 3, 6, \"PLAYER 4\")\n put_circle(Game_Display, Colours[cor4], 6, 6, 50)\n put_text(Game_Display, branco, 3, 7, \"PLAYER 5\")\n put_circle(Game_Display, Colours[cor5], 6, 7, 50)\n put_text(Game_Display, branco, 3, 8, \"BACK\")\n elif action == 5:\n put_text(Game_Display, branco, 3, 3, \"PLAYER 1\")\n put_circle(Game_Display, Colours[cor1], 6, 3, 50)\n put_text(Game_Display, branco, 3, 4, \"PLAYER 2\")\n put_circle(Game_Display, Colours[cor2], 6, 4, 50)\n put_text(Game_Display, branco, 3, 5, \"PLAYER 3\")\n put_circle(Game_Display, Colours[cor3], 6, 5, 50)\n put_text(Game_Display, branco, 3, 6, \"PLAYER 4\")\n put_circle(Game_Display, Colours[cor4], 6, 6, 50)\n put_text(Game_Display, aqua , 3, 7, \"PLAYER 5\")\n put_circle(Game_Display, Colours[cor5], 6, 7, 50)\n put_text(Game_Display, branco, 3, 8, \"BACK\")\n \n pygame.display.update()\n timer.tick(menu_fps)\n \n FileColours = open(colours_file, \"w\")\n FileColours.write(\"Player 1: {0}\\nPlayer 2: {1}\\nPlayer 3: {2}\\nPlayer 4: {3}\\nPlayer 5: {4}\".format(cor1, cor2, cor3, cor4, cor5))\n FileColours.close()\n \n return\n\n\n# Loop das opções do programa\ndef options_loop():\n exit_options = False\n while not exit_options:\n leave_options_menu = configs_menu()\n if leave_options_menu == 0:\n exit_options = True\n elif leave_options_menu == 1:\n options_menu()\n elif leave_options_menu == 2:\n controls_menu()\n elif leave_options_menu == 3:\n colours_menu()\n return\n\n\nloop()\npygame.quit()\n","sub_path":"Minhocada.py","file_name":"Minhocada.py","file_ext":"py","file_size_in_byte":53294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"53723156","text":"# invoke as python databaseindexcreation.py ../resources/datafile.txt ../out ../out\n\nimport sys\ninputFile = open(sys.argv[1], 'r')\ndatain = open(sys.argv[2] + '/data.in', 'w')\nlongseq = ''\nfor line in inputFile:\n if line.startswith('>'):\n gi = line.split('|')[1]\n if longseq != '':\n longseq += '@'\n datain.writelines(gi + '\\t' + str(len(longseq)) + '\\n')\n else:\n longseq += line.replace('\\n', '')\noutputFile = open(sys.argv[3] + '/data.seq', 'w')\noutputFile.write(longseq)","sub_path":"src/Assignment1/databaseindexcreation.py","file_name":"databaseindexcreation.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"122745752","text":"from torch import Tensor\nfrom sybric.utils import tile\n\n\ndef test_tile():\n y = Tensor([0, 1, 2])\n y_tiled = tile(y, 3)\n expected = Tensor([[0, 0, 0],\n [1, 1, 1], \n [2, 2, 2]])\n assert (y_tiled == expected).all()\n","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"26263225","text":"import ftoml as toml\n\nfrom .resources import Resources\nfrom .entity_group import EntityGroup\n\ndef load_level(level: str, group: EntityGroup, resources: Resources, constructors):\n \"\"\"Loads the level in the given file into the given group\"\"\"\n with open(level) as f:\n data = toml.loads(f.read())\n \n for resource_file in data.get(\"resources\", []):\n print(\"Loading resource {!r}\".format(resource_file))\n resources.load(resource_file+\".toml\")\n \n for ent in data.get(\"entities\", []):\n t = ent[\"type\"]\n if t not in constructors:\n raise ValueError(\"Class {!r} not found!\".format(t))\n constructor = constructors[t]\n if type(constructor) is not type:\n raise TypeError(\"{!r} ({}) is not a valid constructor!\".format(\n t, type(constructor)\n ))\n x, y = ent[\"pos\"]\n entity = constructor(x, y, resources)\n if \"layer\" in ent:\n group.add(entity, draw_layer=ent[\"layer\"])\n else:\n group.add(entity)\n \n if \"tags\" in ent:\n group.add_tags(entity, *ent[\"tags\"])\n ","sub_path":"level.py","file_name":"level.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"537603564","text":"import datetime\nimport decimal\n\nfrom ...entities.commit import Commit\nfrom ...entities.history import get_history\nfrom ...tests.api import _fixtures\nfrom ...tests.helpers import _uuid\n\nREPO = \"https://github.com/org/something\"\nMACHINE = \"diana-2-4-17179869184\"\n\n\ndef test_history():\n commit_1 = Commit.create(\n {\n \"sha\": \"x11111\",\n \"repository\": REPO,\n \"parent\": \"00000\",\n \"timestamp\": datetime.datetime(2021, 11, 1),\n \"message\": \"message 11111\",\n \"author_name\": \"author_name\",\n \"author_login\": \"author_login\",\n \"author_avatar\": \"author_avatar\",\n }\n )\n commit_2 = Commit.create(\n {\n \"sha\": \"x22222\",\n \"repository\": REPO,\n \"parent\": \"11111\",\n \"timestamp\": datetime.datetime(2021, 11, 2),\n \"message\": \"message 22222\",\n \"author_name\": \"author_name\",\n \"author_login\": \"author_login\",\n \"author_avatar\": \"author_avatar\",\n }\n )\n commit_3 = Commit.create(\n {\n \"sha\": \"x33333\",\n \"repository\": REPO,\n \"parent\": \"22222\",\n \"timestamp\": datetime.datetime(2021, 11, 3),\n \"message\": \"message 33333\",\n \"author_name\": \"author_name\",\n \"author_login\": \"author_login\",\n \"author_avatar\": \"author_avatar\",\n }\n )\n commit_4 = Commit.create(\n {\n \"sha\": \"x44444\",\n \"repository\": REPO,\n \"parent\": \"33333\",\n \"timestamp\": datetime.datetime(2021, 11, 4),\n \"message\": \"message 44444\",\n \"author_name\": \"author_name\",\n \"author_login\": \"author_login\",\n \"author_avatar\": \"author_avatar\",\n }\n )\n\n name = _uuid()\n data = [2.1, 2.0, 1.99] # first commit\n summary_1 = _fixtures.summary(results=data, commit=commit_1, name=name)\n\n data = [1.99, 2.0, 2.1] # stayed the same\n summary_2 = _fixtures.summary(results=data, commit=commit_2, name=name)\n\n data = [1.1, 1.0, 0.99] # got better\n summary_3 = _fixtures.summary(results=data, commit=commit_3, name=name)\n\n data = [1.2, 1.1, 1.0] # stayed about the same\n summary_4 = _fixtures.summary(results=data, commit=commit_4, name=name)\n\n data = [3.1, 3.0, 2.99] # measure commit 4 twice\n summary_5 = _fixtures.summary(results=data, commit=commit_4, name=name)\n\n data, name = [5.1, 5.2, 5.3], \"different-case\"\n _fixtures.summary(results=data, commit=commit_1, name=name)\n\n data, language = [6.1, 6.2, 6.3], \"different-context\"\n _fixtures.summary(results=data, commit=commit_1, name=name, language=language)\n\n data, machine = [7.1, 7.2, 7.3], \"different-machine\"\n _fixtures.summary(results=data, commit=commit_1, name=name, machine=machine)\n\n data = [8.1, 8.2, 8.3] # pull request, exclude from history\n _fixtures.summary(results=data, commit=commit_1, name=name, pull_request=True)\n\n assert summary_1.case_id == summary_2.case_id\n assert summary_1.case_id == summary_3.case_id\n assert summary_1.case_id == summary_4.case_id\n assert summary_1.case_id == summary_5.case_id\n\n assert summary_1.run.machine_id == summary_2.run.machine_id\n assert summary_1.run.machine_id == summary_3.run.machine_id\n assert summary_1.run.machine_id == summary_4.run.machine_id\n assert summary_1.run.machine_id == summary_5.run.machine_id\n\n case_id = summary_1.case_id\n context_id = summary_1.context_id\n machine_hash = summary_1.run.machine.hash\n\n # ----- get_commit_index\n\n expected = [\n (\n summary_1.id,\n case_id,\n context_id,\n summary_1.mean,\n \"s\",\n machine_hash,\n commit_1.sha,\n REPO,\n \"message 11111\",\n datetime.datetime(2021, 11, 1),\n decimal.Decimal(\"2.0300000000000000\"),\n None,\n summary_1.run.name,\n ),\n (\n summary_2.id,\n case_id,\n context_id,\n summary_2.mean,\n \"s\",\n machine_hash,\n commit_2.sha,\n REPO,\n \"message 22222\",\n datetime.datetime(2021, 11, 2),\n decimal.Decimal(\"2.0300000000000000\"),\n decimal.Decimal(\"0\"),\n summary_2.run.name,\n ),\n (\n summary_3.id,\n case_id,\n context_id,\n summary_3.mean,\n \"s\",\n machine_hash,\n commit_3.sha,\n REPO,\n \"message 33333\",\n datetime.datetime(2021, 11, 3),\n decimal.Decimal(\"1.6966666666666667\"),\n decimal.Decimal(\"0.57735026918962576451\"),\n summary_3.run.name,\n ),\n (\n summary_4.id,\n case_id,\n context_id,\n summary_4.mean,\n \"s\",\n machine_hash,\n commit_4.sha,\n REPO,\n \"message 44444\",\n datetime.datetime(2021, 11, 4),\n decimal.Decimal(\"1.8440000000000000\"),\n decimal.Decimal(\"0.82035358230460601799\"),\n summary_4.run.name,\n ),\n (\n summary_5.id,\n case_id,\n context_id,\n summary_5.mean,\n \"s\",\n machine_hash,\n commit_4.sha,\n REPO,\n \"message 44444\",\n datetime.datetime(2021, 11, 4),\n decimal.Decimal(\"1.8440000000000000\"),\n decimal.Decimal(\"0.82035358230460601799\"),\n summary_5.run.name,\n ),\n ]\n actual = get_history(case_id, context_id, machine_hash)\n assert len(actual) == len(expected)\n assert set(actual) == set(expected)\n","sub_path":"conbench/tests/entities/test_history.py","file_name":"test_history.py","file_ext":"py","file_size_in_byte":5781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"650445398","text":"# #################################################################\n#\n# pgAdmin 4 - PostgreSQL Tools\n#\n# Copyright (C) 2013 - 2016, The pgAdmin Development Team\n# This software is released under the PostgreSQL Licence\n#\n# ##################################################################\n\nimport json\n\nfrom pgadmin.utils.route import BaseTestGenerator\nfrom regression import test_utils as utils\n\n\nclass DatabaseAddTestCase(BaseTestGenerator):\n \"\"\"\n This class will check server group node present on the object browser's\n tree node by response code.\n \"\"\"\n\n scenarios = [\n # Fetching default URL for database node.\n ('Check Databases Node URL', dict(url='/browser/database/obj/'))\n ]\n\n def setUp(self):\n \"\"\"\n This function used to add the sever\n\n :return: None\n \"\"\"\n\n # Add the server\n utils.add_server(self.tester)\n\n def runTest(self):\n \"\"\" This function will add database under 1st server of tree node. \"\"\"\n\n server_connect_response, server_group, server_ids = \\\n utils.connect_server(self.tester)\n\n for server_connect, server_id in zip(server_connect_response,\n server_ids):\n if server_connect['data']['connected']:\n data = utils.get_db_data(server_connect)\n db_response = self.tester.post(self.url + str(server_group) +\n \"/\" + server_id + \"/\",\n data=json.dumps(data),\n content_type='html/json')\n self.assertTrue(db_response.status_code, 200)\n response_data = json.loads(db_response.data.decode('utf-8'))\n utils.write_db_parent_id(response_data)\n\n def tearDown(self):\n \"\"\"\n This function deletes the added database, added server and the\n 'parent_id.pkl' file which is created in setup()\n\n :return: None\n \"\"\"\n\n utils.delete_database(self.tester)\n utils.delete_server(self.tester)\n utils.delete_parent_id_file()\n","sub_path":"web/pgadmin/browser/server_groups/servers/databases/tests/test_db_add.py","file_name":"test_db_add.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"97431703","text":"import numpy as np\nimport torch\nimport torch.optim as optim\n\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torch.cuda.amp import autocast\n\nimport time\nimport os\n\nimport core\nfrom core import dcum2 as discounted_cumsum\n\nfrom grid2op.Agent import AgentWithConverter\nfrom grid2op.Converter import IdToAct\n\n\nclass ReplayBuffer:\n \"\"\"\n Transitions buffer\n Stores transitions for a single episode\n \"\"\"\n\n def __init__(self,\n act_dim,\n obs_dim,\n size=4000,\n gamma=.98,\n lamda=.95,\n device=None):\n self.size = size\n self.gamma = gamma\n self.lamda = lamda\n\n self.rewards = np.zeros([size], dtype=np.float32)\n self.actions = np.zeros([size, act_dim], dtype=np.float32)\n self.states = np.zeros([size, obs_dim], dtype=np.float32)\n\n self.log_prob = np.zeros([size], dtype=np.float32)\n self.adv = np.zeros([size], dtype=np.float32)\n self.vals = np.zeros([size], dtype=np.float32)\n\n self.ptr, self.eps_end_ptr = 0, 0\n self.device = device\n\n def store(self, act, states, values, rew, log_p):\n \"\"\"\n Store transitions\n \"\"\"\n idx = self.ptr % self.size\n\n self.rewards[idx] = rew\n self.actions[idx] = act\n self.states[idx] = states\n self.vals[idx] = values\n self.log_prob[idx] = log_p\n\n self.ptr += 1\n\n def get(self):\n \"\"\"\n Returns episode transitions\n \"\"\"\n assert self.ptr >= self.size\n\n self.ptr = 0\n self.eps_end_ptr = 0\n return torch.from_numpy(self.actions).to(self.device), torch.from_numpy(self.rewards).to(self.device), \\\n torch.from_numpy(self.states).to(self.device), torch.from_numpy(\n self.adv).to(self.device), torch.from_numpy(self.log_prob).to(self.device)\n\n def end_eps(self, value=0):\n \"\"\"\n Calculates the adv once the agent\n encounters an end state\n\n value: value of that state -> zero if the agent\n died or the value function if the episode was terminated\n \"\"\"\n idx = slice(self.eps_end_ptr, self.ptr)\n\n rew = np.append(self.rewards[idx], value)\n vals = np.append(self.vals[idx], value)\n\n # GAE\n deltas = rew[:-1] + self.gamma * vals[1:] - vals[:-1]\n self.adv[idx] = discounted_cumsum(deltas, self.gamma * self.lamda)\n self.adv = (self.adv - self.adv.mean()) / self.adv.std()\n\n # Reward to go\n self.rewards[idx] = discounted_cumsum(rew, self.gamma)[:-1]\n\n self.eps_end_ptr = self.ptr\n\n\nclass PPOAgent(AgentWithConverter):\n def __init__(self,\n env,\n observation_space,\n action_space,\n actor_class=core.MLPActor,\n **args):\n super(PPOAgent, self).__init__(action_space,\n action_space_converter=IdToAct,\n **args['kwargs_converters'])\n \"\"\"\n actor_args: hidden_size(list), size(int)-network size, pi_lr, v_lr\n max_lr: Max kl divergence between new and old polices (0.01 - 0.05)\n Triggers early stopping for pi training\n \"\"\"\n\n self.args = args\n self.env = env\n self.device = args['device']\n\n if args['filter_acts']:\n self.filter_acts = True\n print('Filtering actions..')\n self.action_space.filter_action(self._filter_act)\n print('Done')\n else:\n self.filter_acts = False\n\n act_dim = self.get_action_size(self.action_space)\n\n if self.args['filter_obs']:\n print('filtering observations..')\n # For obs extraction\n self._tmp_obs, self._indx_obs = None, None\n obs_dim = self._get_obs_size(observation_space)\n\n self.extract_obs(observation_space)\n print('done\\n')\n\n self.filter_obs = True\n else:\n obs_dim = observation_space.size()\n self.filter_obs = False\n\n print('dims', 'obs: ', obs_dim, ' act: ', act_dim)\n\n self.actor = actor_class(obs_dim,\n act_dim,\n discrete=True,\n device=self.device,\n **args['ac_args']).to(self.device)\n params = [\n core.count(module) for module in (self.actor.pi, self.actor.v)\n ]\n print(f'\\nParameters\\npi: {params[0]} v: { params[1] }')\n\n self.memory = ReplayBuffer(act_dim,\n obs_dim,\n args['steps_per_epoch'],\n lamda=args['lamda'],\n gamma=args['gamma'],\n device=self.device)\n\n self.training = self.args['training']\n\n self.max_kl = self.args['max_kl_start']\n self.min_kl = self.args['min_kl_stop']\n\n self.pi_optimizer = optim.Adam(self.actor.pi.parameters(),\n args['pi_lr'])\n\n if self.args['schedule_pi_lr']:\n self.pi_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n self.pi_optimizer,\n patience=5,\n verbose=True\n # min_lr=self.args['min_pi_lr']\n # T_max=self.args['max_pi_epoch'], eta_min=self.args['min_pi_lr']\n )\n self.v_optimizer = optim.Adam(self.actor.v.parameters(), args['v_lr'])\n\n # Hold epoch losses for logging\n self.pi_losses, self.v_losses, self.delta_v_logs, self.delta_pi_logs = [], [], [], []\n self.pi_kl = [] # kls for logging\n self.v_logs = []\n self.first_run_ret = None\n\n run_t = time.strftime('%Y-%m-%d-%H-%M-%S')\n self.path_time = time.strftime('%Y-%m-%d') # For model path\n path = os.path.join('data',\n env.name + args.get('env_name', '') + '_' + run_t)\n\n self.logger = SummaryWriter(log_dir=path)\n self.pi_scaler = torch.cuda.amp.GradScaler()\n self.v_scaler = torch.cuda.amp.GradScaler()\n\n print('\\n..Init done')\n\n def _get_obs_size(self, obs_space):\n \"\"\"\n Get the dimension of an observation\n given the extracted observation attributes\n \"\"\"\n size = 0\n\n for attr_name in self.args['obs_attributes']:\n start, end, _ = obs_space.get_indx_extract(attr_name)\n size += (end - start)\n\n return size\n\n def get_action_size(self, act_space):\n \"\"\"\n Gives the size of the action space\n after the extracted action attributes\n \"\"\"\n convertor = IdToAct(act_space)\n convertor.init_converter(**self.args['kwargs_converters'])\n\n if self.filter_acts:\n convertor.filter_action(self._filter_act)\n\n return convertor.n\n\n def _compute_pi_loss(self, log_p_old, adv_b, act_b, obs_b):\n \"\"\"\n Pi loss\n \"\"\"\n clip_ratio = self.args['clip_ratio']\n\n with autocast():\n # returns new_pi_normal_distribution, logp_act\n _, log_p_ = self.actor.pi(obs_b, act_b)\n log_p_ = log_p_.type(torch.float32) # From torch.float64\n\n pi_ratio = torch.exp(log_p_ - log_p_old)\n min_adv = torch.where(adv_b >= 0, (1 + clip_ratio) * adv_b,\n (1 - clip_ratio) * adv_b)\n\n pi_loss = -torch.mean(torch.min(pi_ratio * adv_b, min_adv))\n\n return pi_loss, (log_p_old - log_p_).mean().item() # kl\n\n def _compute_v_loss(self, data):\n \"\"\"\n Value function loss\n \"\"\"\n obs_b, rew_b = data['obs_b'], data['rew_b']\n\n with autocast():\n v_pred = self.actor.v(obs_b)\n v_loss = ((v_pred - rew_b)**2).mean()\n\n return v_loss\n\n def get_kl(self, itr):\n \"\"\"\n Return KL target based on current epoch\n \"\"\"\n T_epoch = self.args['kl_fin_epoch']\n\n if itr > T_epoch:\n return self.min_kl\n\n rate = (self.max_kl - self.min_kl) / T_epoch\n\n return self.max_kl - (rate * itr)\n\n def _filter_act(self, action):\n \"\"\"\n Wrapper to Filter the action space\n Passed to self.filter_action\n \"\"\"\n max_elem = 2\n\n act_dict = action.impact_on_objects()\n elem = 0\n elem += act_dict[\"force_line\"][\"reconnections\"][\"count\"]\n elem += act_dict[\"force_line\"][\"disconnections\"][\"count\"]\n elem += act_dict[\"switch_line\"][\"count\"]\n elem += len(act_dict[\"topology\"][\"bus_switch\"])\n elem += len(act_dict[\"topology\"][\"assigned_bus\"])\n elem += len(act_dict[\"topology\"][\"disconnect_bus\"])\n elem += len(act_dict[\"redispatch\"][\"generators\"])\n\n if elem <= max_elem:\n return True\n return False\n\n def extract_obs(self, obs_space):\n \"\"\"\n Initializes the observation by extracting the\n listed attribute names selected to represent the\n observation.\n \"\"\"\n\n tmp = np.zeros(0, dtype=np.uint) # TODO platform independant\n for obs_attr_name in self.args['obs_attributes']:\n beg_, end_, _ = obs_space.get_indx_extract(obs_attr_name)\n tmp = np.concatenate((tmp, np.arange(beg_, end_, dtype=np.uint)))\n self._indx_obs = tmp\n self._tmp_obs = np.zeros((1, tmp.shape[0]), dtype=np.float32)\n\n def convert_obs(self, observation):\n \"\"\"\n Overrides super:\n\n Converts an observation into a vector then\n selects the attribues identified to represent\n the observation\n \"\"\"\n\n obs_vec = observation.to_vect()\n if self.filter_obs:\n self._tmp_obs[:] = obs_vec[self._indx_obs]\n return self._tmp_obs\n\n return obs_vec\n\n def my_act(self, transformed_obs, reward=None, done=False):\n \"\"\"\n Used by the agent to decide on action to take\n\n Returns an `encoded_action` which is reconverted\n by the inherited `self.convert_act` into a valid\n action that can be taken in the env\n\n\n \"\"\"\n\n act = self.predict_action(transformed_obs)\n\n return act\n\n def train(self):\n \"\"\"\n Trains actor\n \"\"\"\n self.run_training_loop()\n\n def _update(self, epoch):\n \"\"\"\n Update the policy and value function from loss\n \"\"\"\n data = self.memory.get()\n act_b, rew_b, obs_b, adv_b, log_p_old = data\n train_args = self.args\n\n # loss before update\n pi_loss_old, kl = self._compute_pi_loss(log_p_old=log_p_old,\n obs_b=obs_b,\n adv_b=adv_b,\n act_b=act_b)\n\n v_loss_old = self._compute_v_loss({\n 'obs_b': obs_b,\n 'rew_b': rew_b\n }).item()\n\n kl_target = self.get_kl(\n epoch) if self.args['anneal_kl'] else self.args['target_kl']\n\n for i in range(train_args['pi_train_n_iters']):\n self.pi_optimizer.zero_grad()\n pi_loss, kl = self._compute_pi_loss(log_p_old=log_p_old,\n obs_b=obs_b,\n adv_b=adv_b,\n act_b=act_b)\n\n # Early stop for high Kl\n if kl > kl_target:\n print('Max kl reached: ', kl, '[target: ', kl_target,\n '] iter: ', i)\n break\n\n self.pi_scaler.scale(pi_loss).backward()\n self.pi_scaler.step(self.pi_optimizer)\n\n self.pi_scaler.update()\n\n if self.args['schedule_pi_lr']:\n self.pi_scheduler.step(pi_loss)\n\n self.logger.add_scalar('PiStopIter', i, epoch)\n pi_loss = pi_loss.item()\n\n for i in range(train_args['v_train_n_iters']):\n self.v_optimizer.zero_grad()\n v_loss = self._compute_v_loss({'obs_b': obs_b, 'rew_b': rew_b})\n\n self.v_scaler.scale(v_loss).backward()\n self.v_scaler.step(self.v_optimizer)\n\n self.v_scaler.update()\n\n v_loss = v_loss.item()\n\n self.pi_losses.append(pi_loss)\n self.pi_kl.append(kl)\n self.v_losses.append(v_loss)\n\n delta_v_loss = v_loss_old - v_loss\n delta_pi_loss = pi_loss_old.item() - pi_loss\n\n self.delta_v_logs.append(delta_v_loss)\n self.delta_pi_logs.append(delta_pi_loss)\n\n self.logger.add_scalar('loss/pi', pi_loss, epoch)\n self.logger.add_scalar('loss/v', v_loss, epoch)\n\n self.logger.add_scalar('loss/Delta-Pi', delta_pi_loss, epoch)\n self.logger.add_scalar('loss/Delta-V', delta_v_loss, epoch)\n self.logger.add_scalar('Kl', kl, epoch)\n\n def predict_action(self, obs):\n \"\"\"\n Selects an action given an observation\n \"\"\"\n\n return self.actor.step(torch.from_numpy(obs).to(self.device),\n act_only=True)\n\n def load(self, path='PPO_MODEL.pt'):\n \"\"\"\n Loads trained actor network\n \"\"\"\n self.actor.load_state_dict(torch.load(path, map_location=self.device))\n print(f'Loaded model from: {path}')\n\n if not self.training:\n self.actor.eval() # sets self.train(False)\n\n def save(self, path='PPO_MODEL.pt'):\n \"\"\"\n Saves trained actor net parameters\n \"\"\"\n path = self.args['save_path']\n\n name, ext = path.rsplit('.', 1)\n path_name = f'{ name }-{self.path_time}.{ext}'\n\n torch.save(self.actor.state_dict(), path_name)\n print(f'Saved model at -> {path_name}')\n\n def run_training_loop(self):\n start_time = time.time()\n obs = self.convert_obs(self.env.reset())\n eps_len, eps_ret = 0, 0\n\n n_epochs = self.args['n_epochs']\n steps_per_epoch = self.args['steps_per_epoch']\n max_eps_len = self.args['max_eps_len']\n\n err_act_msg = [\n 'is_illegal', 'is_ambiguous', 'is_dispatching_illegal',\n 'is_illegal_reco'\n ]\n log_steps = self.args['log_step_freq']\n\n for t in range(n_epochs):\n eps_len_logs, eps_ret_log = [], []\n for step in range(steps_per_epoch):\n\n # Taking really long\n if log_steps and not step % log_steps:\n print(f'epoch: {t}, step: {step}')\n\n a, v, log_p = self.actor.step(\n torch.from_numpy(obs).type(torch.float32).to(self.device))\n act = a\n\n # log v\n self.v_logs.append(v)\n obs_n, rew, done, info = self.env.step(self.convert_act(a[0]))\n\n obs_n = self.convert_obs(obs_n)\n\n # Invalid action\n _ = [\n print(a, err_msg) for err_msg in err_act_msg\n if info[err_msg]\n ]\n\n eps_len += 1\n eps_ret += rew\n\n self.memory.store(a, obs, values=v, log_p=log_p, rew=rew)\n\n obs = obs_n\n\n terminal = done or eps_len == max_eps_len\n\n if terminal or step == steps_per_epoch - 1:\n # terminated by max episode steps\n if not done:\n last_v = self.actor.step(\n torch.from_numpy(obs).type(torch.float32).to(\n self.device))[1]\n else: # Agent terminated episode\n last_v = 0\n\n if terminal:\n # only log these for terminals\n eps_len_logs += [eps_len]\n eps_ret_log += [eps_ret]\n\n self.memory.end_eps(value=last_v)\n\n obs = self.env.reset()\n obs = self.convert_obs(obs)\n\n eps_len, eps_ret = 0, 0\n\n self._update(t + 1)\n l_t = t + 1 # log_time, start at 1\n\n # Print info for each epoch: loss_pi, loss_v, kl\n # time, v at traj collection, eps_len, epoch_no,\n # eps_ret: min, max, av\n AverageEpisodeLen = np.mean(eps_len_logs)\n\n self.logger.add_scalar('AvEpsLen', AverageEpisodeLen, l_t)\n # MaxEpisodeLen = np.max(eps_len_logs)\n # MinEpsiodeLen = np.min(eps_len_logs)\n AverageEpsReturn = np.mean(eps_ret_log)\n try:\n MaxEpsReturn = np.max(eps_ret_log)\n MinEpsReturn = np.min(eps_ret_log)\n except ValueError:\n MaxEpsReturn = 0\n MinEpsReturn = 0\n\n self.logger.add_scalar('EpsReturn/Max', MaxEpsReturn, l_t)\n self.logger.add_scalar('EpsReturn/Min', MinEpsReturn, l_t)\n self.logger.add_scalar('EpsReturn/Average', AverageEpsReturn, l_t)\n\n # Retrieved by index, not time step ( no +1 )\n Pi_Loss = self.pi_losses[t]\n V_loss = self.v_losses[t]\n Kl = self.pi_kl[t]\n delta_v_loss = self.delta_v_logs[t]\n delta_pi_loss = self.delta_pi_logs[t]\n\n if t == 0:\n self.first_run_ret = AverageEpsReturn\n\n logs = {\n 'EpsReturn/Average': AverageEpsReturn,\n 'EpsReturn/Max': MaxEpsReturn,\n 'EpsReturn/Min': MinEpsReturn,\n 'AverageEpsLen': AverageEpisodeLen,\n 'KL': Kl,\n 'Pi_Loss': Pi_Loss,\n 'V_loss': V_loss,\n 'FirstEpochAvReturn': self.first_run_ret,\n 'Delta-V': delta_v_loss,\n 'Delta-Pi': delta_pi_loss,\n 'RunTime': time.time() - start_time\n }\n\n print('\\n', t + 1)\n print('-' * 35)\n for k, v in logs.items():\n print(k, v)\n print('\\n\\n\\n')\n\n # Save model\n final_epoch = t == n_epochs - 1\n\n if (t and not t % self.args['save_frequency']) or final_epoch:\n print('Saving model..')\n self.save()\n","sub_path":"ppo/ppo.py","file_name":"ppo.py","file_ext":"py","file_size_in_byte":18462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"559821935","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#############\n# read data #\n#############\n\napplication_test_data = pd.read_csv(\"./data/application_test.csv\")\napplication_train_data = pd.read_csv(\"./data/application_train.csv\")\n# bureau_data = pd.read_csv(\"./data/bureau.csv\")\n# bureau_balance_data = pd.read_csv(\"./data/bureau_balance.csv\")\n# credit_card_balance_data = pd.read_csv(\"./data/credit_card_balance.csv\")\nhome_credit_description = pd.read_csv(\"data/HomeCredit_columns_description.csv\", encoding=\"ISO-8859-1\")\n# installments_payment_data = pd.read_csv(\"data/installments_payments.csv\")\n# pos_cash_balance_data = pd.read_csv(\"data/POS_CASH_balance.csv\")\n# previous_application_data = pd.read_csv(\"data/previous_application.csv\")\n\n\n## sample size\ntraining_sample_size = application_train_data.shape[0]\ntest_sample_size = application_test_data.shape[0]\nfull_sample_size = training_sample_size + test_sample_size\n\n# so we have 307511 training data and 120 features in application_train.csv.\n\n\n###############################################\n# missing data in application_train/test_data #\n###############################################\n\n## check the distribution of number of missing values for each feature\n# train data\nmissing_count_each_feature_train = application_train_data.isnull().sum()\n\n# test data\nmissing_count_each_feature_test = application_test_data.isnull().sum()\n\n## find out which features don't have missing values in both train and test data. Are they the same?\nno_missing_features_train = missing_count_each_feature_train.index[missing_count_each_feature_train == 0]\nno_missing_features_test = missing_count_each_feature_test.index[missing_count_each_feature_test == 0]\n\nlen(no_missing_features_train)\nlen(no_missing_features_test)\n\n# there are 120 features in total.\n\nmissing_in_test_boolean = [not i in no_missing_features_test for i in no_missing_features_train]\nno_missing_features_train[missing_in_test_boolean]\n\n# all features without missing values in training data are also complete in test data.\n# remember target is the output\n\n\n## check distribution of number of missing values for each features\nnumber_features_with_same_missing_count = missing_count_each_feature_train.value_counts()\n\nplt.figure()\nnumber_features_with_same_missing_count.plot.bar()\nplt.xlabel(\"Number of missing observations\")\nplt.ylabel(\"Count of features\")\nplt.savefig(\"./plots/exploratory/count missing values for each features\")\n\n# the result suggests that some features are missing at the same time. Not missing at random.\n# it would be nice if we can try out some more sophisticated imputation method.\n\n\n## check distribution of number of missing values for each person\nmissing_count_each_person_train = application_train_data.isnull().sum(axis=1)\nnumber_person_with_same_missing_count = missing_count_each_person_train.value_counts()\n\nplt.figure()\nnumber_person_with_same_missing_count.plot.bar()\nplt.xlabel(\"Number of missing features\")\nplt.ylabel(\"Count of applicants\")\nplt.savefig(\"./plots/exploratory/count missing values for each person\")\n\n# check the percentage of applicants in the training data who don't have missing values\n(missing_count_each_person_train == 0).sum() / len(missing_count_each_person_train)\n\n# most applicants in the training data have missing feature values.\n\n\n#########################\n# check class imbalance #\n#########################\n\ntarget = application_train_data[[\"TARGET\"]]\ntarget = target.iloc[:, 0]\ntarget.unique()\n(target == 0).sum() / len(target)\n\n# severe class imbalance problem.\n\n\n####################################################\n# export the HomeCredit_columns_descrition to read #\n####################################################\n\nhome_credit_description_read = home_credit_description.iloc[:, 1:].copy()\n\nwith open(\"description_table_other.txt\", \"w\") as file:\n file.write(home_credit_description_read.loc[:, home_credit_description_read.columns != \"Description\"].to_string())\n\nwith open(\"description_table_description.txt\", \"w\") as file:\n for i in np.arange(home_credit_description_read.shape[0]):\n file.write(str(i) + \". \" + home_credit_description_read.iloc[i, 2] + \"\\n\\n\")\n\n#######################################################################\n# create a dataFrame of missing data summary for the application data #\n#######################################################################\n\n# Through exploratory analysis, we found that the raw contains a lot of missing values among features.\n# The purpose of this section to provide us a better knowledge of features to decide the missing data technique\n# we will employ.\n# The data frame will contains the following columns:\n# 1. feature names (string)\n# 2. a boolean variable indicating whether the feature is categorical or not\n# 3. a numeric variable showing the percentage samples in application_train.csv which miss the value of the feature.\n# 4. three boolean variables indicating whether the feature is the subject of log_transforamtion,\n# abs_log_transformation and one-hot encoding. We are going to add them in data_cleaning script.\n\n# In case you don't know what \"all\" function in the base python does, check out the following example\na = [1, 2, 3, 1, 2, 3]\nt = (1, 2, 3)\ny = (4, 5, 6)\nall(v in t for v in a)\nall(v in y for v in a)\n\n\ndef pandas_series_missing_data(col_series, missing_count_each_feature_dt):\n # Input:\n # 1. col_series: a pandas Series which is one of the columns in HomeCredit_columns_description.csv\n # 2. missing_count_each_feature_dt: a pandas dataframe storing numbers of samples without each feature in the\n # application (train) data.\n # Return:\n # 1. a pandas series with labels \"feature_name\", \"is_categorical\" and \"percentage_missing\".\n # \"percentage_missing\" records the percentage of samples in the training data missing the value of\n # the features.\n\n # find feature_name\n colname = col_series.name\n\n # find is_categorical\n if all(element in (0, 1) for element in col_series.unique()) | all(element in (1, 2, 3) for element in\n col_series.unique()):\n categorical = True\n elif col_series.dtype == \"O\":\n categorical = True\n # The following elif is just a cheap fix.\n elif col_series.name == \"SK_ID_CURR\":\n categorical = True\n else:\n categorical = False\n\n # find percentage_missing\n sample_size = len(col_series)\n percentage_missing = missing_count_each_feature_dt.loc[colname][0] / sample_size * 100\n\n # build pandas series\n data_dictionary = {\"feature_name\": colname, \"is_categorical\": categorical, \"percentage_missing\": percentage_missing}\n pd_series = pd.Series(data_dictionary)\n return pd_series\n\n\n# turn missing_count_each_feature_train into a dataframe by adding index to it\nmissing_count_each_feature_train_dt = pd.DataFrame(missing_count_each_feature_train, application_train_data.columns)\nmissing_series_list = []\nfor feature in application_train_data.columns.values:\n col_series = application_train_data.loc[:, feature]\n missing_series = pandas_series_missing_data(col_series, missing_count_each_feature_train_dt)\n missing_series_list.append(missing_series)\n\nmissing_summary_data_frame = pd.DataFrame(missing_series_list)\nmissing_summary_data_frame.set_index(\"feature_name\", inplace = True)\nmissing_summary_data_frame.to_pickle(\"./data/missing_summary_data_frame\")\n\n# Now we have a data frame telling us if a feature is complete in the training data or not and if it is categorical.\n# We saved the dataframe in \"missing_summary_data_frame.pkl\" pickle file.\n","sub_path":"Exploratory analysis.py","file_name":"Exploratory analysis.py","file_ext":"py","file_size_in_byte":7704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"438478194","text":"#!/bin/env python3\n\nfrom text_functions import *\n\n# first ip is main, which have ssh access to the others \ndef install_ssh(list_ip):\n\tcount = 0\n\tfor ip in list_ip[1:]:\n\t\ttry:\n\t\t\tcommand_to_ssh(ip,\"ssh-keygen\")\n\t\t\tcount += 1\n\t\texcept: \n\t\t\tprint(\"Access from\",ip[0],\"to\",ip,\"not successfull\")\t\n\tprint(count, \"successfull ssh keys generated from\",len(list_ip[1:]),\"machines\")\n\ndef get_keys_from_ip(list_ip):\n\tcount = 0\n\tfor ip in list_ip[1:]:\n\t\ttry:\n\t\t\t# create copies of authorized keys\n\t\t\tcopy_file_from_ssh(ip,\"/root/.ssh/*.pub\",\"./pub_K\"+ip.replace('.',''))\n\t\t\tcopy_file_from_ssh(ip,\"/root/.ssh/authorized_keys\",\"./auth_K\"+ip.replace('.',''))\n\t\t\t# append to 1 file\n\t\t\tappend_text_file(\"./pub_K\"+ip.replace('.',''), \"./pub_Ks\", 0 if count!=0 else 1)\n\t\t\tcount += 1\n\t\texcept:\t\t\t\n\t\t\tprint(\"Appending from ssh authorized keys wasn't successfull from\",ip)\t\n\tprint(count, \"successfull appended ssh keys from\",len(list_ip[1:]),\"machines\")\n\ndef append_pub_to_auth_keys(list_ip):\n\tcount = 0\n\tfor ip in list_ip[1:]:\n\t\ttry:\n\t\t\t# append from 1 file to multiple files auth_ks\n\t\t\tappend_text_file(\"./pub_Ks\",\"./auth_K\"+ip.replace('.',''), 0)\n\t\t\tcount += 1\n\t\texcept:\t\t\t\n\t\t\tprint(\"Appending to txt auth_ks wasn't successfull for\",ip)\t\n\tprint(count, \"successfull appended to txt auth_ks from\",len(list_ip[1:]),\"machines\")\n\ndef distribute_ssh(list_ip):\n\tcount = 0\n\tfor ip in list_ip[1:]:\n\t\ttry:\n\t\t\t# distributing to machines\n\t\t\tcopy_file_to_ssh(ip,\"./auth_K\"+ip.replace('.',''),\"/root/.ssh/authorized_keys\")\n\t\t\tcount += 1\n\t\texcept:\t\t\t\n\t\t\tprint(\"Distributing to ssh authorized keys wasn't successfull to\",ip)\t\n\tprint(count, \"successfull distributed ssh keys from\",len(list_ip[1:]),\"machines\")\n\ndef main(list_ip):\n\tinstall_ssh(list_ip)\n\tget_keys_from_ip(list_ip)\n\tappend_pub_to_auth_keys(list_ip)\n\tdistribute_ssh(list_ip)\n\n\nif __name__ == \"__main__\":\t\n\tinstall_ssh(argv[1])\n\tget_keys_from_ip(argv[1])\n\tappend_pub_to_auth_keys(argv[1])\n\tdistribute_ssh(argv[1])\n\tmain(argv[1])\n","sub_path":"copy_ssh.py","file_name":"copy_ssh.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"505964808","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\nfrom ..models.api_v1 import User_status, Company\nfrom ..serializers.api_v1 import UserStatusSerializer, CompanySerializer\n\n@api_view(['GET'])\ndef Home(request):\n api_url = {\n '/user_status': 'Get all user_statuses and Create a new user_status',\n '/user_status/': 'Get specific user_statuses',\n '/company': 'Get all companies and Create a new company',\n '/company/': 'Get specific company',\n }\n return Response(api_url)\n\n@api_view(['GET', 'POST'])\ndef accessAllUserStatuses(request):\n if request.method == 'GET':\n user_statuses = User_status.objects.all()\n serializer = UserStatusSerializer(user_statuses, many=True)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n serializer = UserStatusSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view(['GET'])\ndef getUserStatus(request, pk):\n user_statuses = User_status.objects.get(uid=pk)\n serializer = UserStatusSerializer(user_statuses, many=False)\n print(serializer.data)\n return Response(serializer.data)\n\n\n@api_view(['GET', 'POST'])\ndef accessAllCompanies(request):\n if request.method == 'GET':\n companies = Company.objects.all()\n serializer = CompanySerializer(companies, many=True)\n return Response(serializer.data)\n \n elif request.method == 'POST':\n serializer = CompanySerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view(['GET'])\ndef getCompany(request, pk):\n if request.method == 'GET':\n companies = Company.objects.get(id=pk)\n serializer = CompanySerializer(companies, many=False)\n print(serializer.data)\n return Response(serializer.data)","sub_path":"api/views/api_v1.py","file_name":"api_v1.py","file_ext":"py","file_size_in_byte":2296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"265750139","text":"from django.shortcuts import render\n\nfrom cv.models import Me\n\n\ndef index(request):\n\n\n\n user = Me.objects.all()[0]\n template_data = {\n 'user': user,\n 'contact_refs': user.contact_refs.all(),\n 'educations': user.educations.all(),\n 'languages': user.languages.all(),\n 'interests': user.interests.all(),\n 'skills': user.skills.all(),\n 'experiences': user.experiences.all(),\n 'extra_sections': user.extra_sections.all()\n }\n\n return render(request, 'indexcv.html', template_data)\n","sub_path":"cv/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"621880376","text":"import os\nimport plotBase\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nclass plot_fraction_stationtype(plotBase.plotBase):\n def doPlot(self, contest, doSave):\n fig, ax = plt.subplots(1, 1, sharex=True)\n fig.suptitle('QSO fraction - Station type', fontsize=12, fontweight='bold')\n\n #--- Plot QSOs per hour and station type\n qsos_running = contest.log[(contest.log[\"station_type\"]==\"running\") & (pd.isnull(contest.log[\"continent\"])==False)][\"hour\"].count()\n qsos_inband = contest.log[(contest.log[\"station_type\"]==\"inband\") & (pd.isnull(contest.log[\"continent\"])==False)][\"hour\"].count()\n qsos_multi = contest.log[(contest.log[\"station_type\"]==\"multi\") & (pd.isnull(contest.log[\"continent\"])==False)][\"hour\"].count()\n\n ax.pie([qsos_running, qsos_inband, qsos_multi], labels=[\"Running\", \"Inband\", \"Multiplier\"], autopct='%1.1f%%')\n ax.axis('equal')\n\n print(\"Fraction of QSOs for each station type.\")\n \n if not doSave:\n plt.show()\n else:\n fig.savefig(contest.folderToSave+\"plot_fraction__stationtype.pdf\", bbox_inches='tight')\n","sub_path":"plotLibrary/plot_fraction_stationtype.py","file_name":"plot_fraction_stationtype.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"259125213","text":"#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import with_statement\n\nimport argparse as _argparse\nimport os as _os\nimport plano as _plano\nimport shlex as _shlex\nimport subprocess as _subprocess\nimport time as _time\nimport traceback as _traceback\n\nfrom .common import *\n\n_description = \"\"\"\nBenchmark message sender, receiver, and server combinations.\n\n'quiver-bench' is one of the Quiver tools for testing the performance\nof message servers and APIs.\n\"\"\"\n\n_epilog = \"\"\"\nThe --include-* and --exclude-* arguments take comma-separated lists\nof implementation names. Use 'quiver-arrow --help' and\n'quiver-server --help' to list the available implementations.\n\"\"\"\n\nclass QuiverBenchCommand(Command):\n def __init__(self, home_dir):\n super(QuiverBenchCommand, self).__init__(home_dir)\n\n self.parser.description = _description.lstrip()\n self.parser.epilog = _epilog.lstrip()\n\n self.parser.add_argument(\"--output\", metavar=\"DIR\",\n help=\"Save output files to DIR\")\n self.parser.add_argument(\"--include-senders\", metavar=\"IMPLS\",\n help=\"Test only senders in IMPLS\",\n default=\"all\")\n self.parser.add_argument(\"--include-receivers\", metavar=\"IMPLS\",\n help=\"Test only receivers in IMPLS\",\n default=\"all\")\n self.parser.add_argument(\"--include-servers\", metavar=\"IMPLS\",\n help=\"Test only servers in IMPLS\",\n default=\"all\")\n self.parser.add_argument(\"--exclude-senders\", metavar=\"IMPLS\",\n help=\"Do not test senders in IMPLS\",\n default=\"none\")\n self.parser.add_argument(\"--exclude-receivers\", metavar=\"IMPLS\",\n help=\"Do not test receivers in IMPLS\",\n default=\"none\")\n self.parser.add_argument(\"--exclude-servers\", metavar=\"IMPLS\",\n help=\"Do not test servers in IMPLS\",\n default=\"builtin\")\n self.parser.add_argument(\"--client-server\", action=\"store_true\",\n help=\"Test only client-server mode\")\n self.parser.add_argument(\"--peer-to-peer\", action=\"store_true\",\n help=\"Test only peer-to-peer mode\")\n self.parser.add_argument(\"--matching-pairs\", action=\"store_true\",\n help=\"Test only matching senders and receivers\")\n\n self.add_common_test_arguments()\n self.add_common_tool_arguments()\n\n def init(self):\n super(QuiverBenchCommand, self).init()\n\n self.output_dir = self.args.output\n\n if self.output_dir is None:\n prefix = _plano.program_name()\n datestamp = _time.strftime('%Y-%m-%d', _time.localtime())\n\n self.output_dir = \"{}-{}\".format(prefix, datestamp)\n\n _plano.remove(self.output_dir)\n _plano.make_dir(self.output_dir)\n\n self.client_server = True\n self.peer_to_peer = True\n\n if self.args.client_server:\n self.peer_to_peer = False\n\n if self.args.peer_to_peer:\n self.client_server = False\n\n self.matching_pairs = self.args.matching_pairs\n\n self.init_impl_attributes()\n self.init_common_test_attributes()\n self.init_common_tool_attributes()\n\n self.failures = list()\n\n def init_impl_attributes(self):\n sender_impls = set(ARROW_IMPLS)\n receiver_impls = set(ARROW_IMPLS)\n server_impls = set(SERVER_IMPLS)\n\n if self.args.include_senders != \"all\":\n sender_impls = self.parse_arrow_impls(self.args.include_senders)\n\n if self.args.include_receivers != \"all\":\n receiver_impls = self.parse_arrow_impls(self.args.include_receivers)\n\n if self.args.include_servers != \"all\":\n server_impls = self.parse_server_impls(self.args.include_servers)\n\n if self.args.exclude_senders != \"none\":\n sender_impls -= self.parse_arrow_impls(self.args.exclude_senders)\n\n if self.args.exclude_receivers != \"none\":\n receiver_impls -= self.parse_arrow_impls(self.args.exclude_receivers)\n\n if self.args.exclude_servers != \"none\":\n server_impls -= self.parse_server_impls(self.args.exclude_servers)\n\n for impl in list(sender_impls):\n file = self.get_arrow_impl_file(impl)\n\n if not _plano.exists(file):\n _plano.warn(\"No implementation at '{}'; skipping it\", file)\n sender_impls.remove(impl)\n\n for impl in list(receiver_impls):\n file = self.get_arrow_impl_file(impl)\n\n if not _plano.exists(file):\n _plano.warn(\"No implementation at '{}'; skipping it\", file)\n receiver_impls.remove(impl)\n\n for impl in list(server_impls):\n file = self.get_server_impl_file(impl)\n\n if not _plano.exists(file):\n _plano.warn(\"No implementation at '{}'; skipping it\", file)\n server_impls.remove(impl)\n\n self.sender_impls = sorted(sender_impls)\n self.receiver_impls = sorted(receiver_impls)\n self.server_impls = sorted(server_impls)\n\n def parse_arrow_impls(self, value):\n impls = set()\n\n for name in value.split(\",\"):\n impls.add(self.get_arrow_impl_name(name, name))\n\n return impls\n\n def parse_server_impls(self, value):\n impls = set()\n\n for name in value.split(\",\"):\n impls.add(self.get_server_impl_name(name, name))\n\n return impls\n\n def run(self):\n if self.client_server:\n for sender_impl in self.sender_impls:\n for receiver_impl in self.receiver_impls:\n if self.matching_pairs:\n if sender_impl != receiver_impl:\n continue\n\n for server_impl in self.server_impls:\n if \"activemq-artemis-jms\" in (sender_impl, receiver_impl):\n if server_impl != \"activemq-artemis\":\n continue\n\n if \"activemq-jms\" in (sender_impl, receiver_impl):\n if server_impl not in (\"activemq\", \"activemq-artemis\"):\n continue\n\n self.run_test(sender_impl, receiver_impl, server_impl)\n\n if self.peer_to_peer:\n for sender_impl in self.sender_impls:\n if sender_impl in (\"activemq-jms\", \"activemq-artemis-jms\"):\n continue\n\n for receiver_impl in self.receiver_impls:\n if self.matching_pairs:\n if sender_impl != receiver_impl:\n continue\n\n if receiver_impl not in PEER_TO_PEER_ARROW_IMPLS:\n continue\n\n self.run_test(sender_impl, receiver_impl, None)\n\n print(\"Test failures: {}\".format(len(self.failures)))\n\n for failure in self.failures:\n print(failure) # Need summary\n\n if len(self.failures) > 0:\n _plano.exit(1)\n\n def run_test(self, sender_impl, receiver_impl, server_impl):\n if server_impl is None:\n summary = \"{} -> {} \".format(sender_impl, receiver_impl)\n test_dir = _plano.join(self.output_dir, sender_impl, receiver_impl, \"peer-to-peer\")\n else:\n summary = \"{} -> {} -> {} \".format(sender_impl, server_impl, receiver_impl)\n test_dir = _plano.join(self.output_dir, sender_impl, receiver_impl, server_impl)\n\n print(\"{:.<113} \".format(summary), end=\"\")\n\n _plano.flush()\n _plano.make_dir(test_dir)\n\n test_data_dir = _plano.join(test_dir, \"data\")\n test_output_file = _plano.join(test_dir, \"output.txt\")\n test_status_file = _plano.join(test_dir, \"status.txt\")\n\n test_command = [\n \"quiver\", \"//127.0.0.1:56720/q0\",\n \"--sender\", sender_impl,\n \"--receiver\", receiver_impl,\n \"--output\", test_data_dir,\n \"--messages\", self.args.messages,\n \"--body-size\", self.args.body_size,\n \"--credit\", self.args.credit,\n \"--timeout\", self.args.timeout,\n ]\n\n if server_impl is None:\n test_command.append(\"--peer-to-peer\")\n\n test_command = \" \".join(test_command)\n\n server = None\n server_output_file = _plano.join(test_dir, \"server-output.txt\")\n\n if server_impl is not None:\n server_ready_file = _plano.make_temp_file()\n\n server_command = [\n \"quiver-server\", \"//127.0.0.1:56720/q0\",\n \"--impl\", server_impl,\n \"--ready-file\", server_ready_file,\n \"--verbose\",\n ]\n\n server_command = \" \".join(server_command)\n\n with open(server_output_file, \"w\") as sf:\n with open(test_output_file, \"w\") as tf:\n try:\n if server_impl is not None:\n server = _plano.start_process(server_command, stdout=sf, stderr=sf)\n\n for i in range(30):\n if _plano.read(server_ready_file) == \"ready\\n\":\n break\n\n _plano.sleep(1)\n else:\n raise _Timeout(\"Timed out waiting for server to be ready\")\n\n _plano.call(test_command, stdout=tf, stderr=tf)\n\n _plano.write(test_status_file, \"PASSED\")\n\n print(\"PASSED\")\n except KeyboardInterrupt:\n raise\n except (_plano.CalledProcessError, _Timeout) as e:\n self.failures.append(str(e)) # XXX capture the combo\n\n _plano.write(test_status_file, \"FAILED: {}\".format(str(e)))\n\n print(\"FAILED\")\n\n if self.verbose:\n # XXX Record the result in this format\n\n print(\"--- Error message ---\")\n print(\"> {}\".format(str(e)))\n print(\"--- Test command ---\")\n print(\"> {}\".format(test_command))\n print(\"--- Test output ---\")\n\n for line in _plano.read_lines(test_output_file):\n print(\"> {}\".format(line), end=\"\")\n\n if server_impl is not None:\n print(\"--- Server command ---\")\n print(\"> {}\".format(server_command))\n print(\"--- Server output ---\")\n\n for line in _plano.read_lines(server_output_file):\n print(\"> {}\".format(line), end=\"\")\n except:\n _traceback.print_exc()\n finally:\n _plano.flush()\n\n if server is not None:\n _plano.stop_process(server)\n\nclass _Timeout(Exception):\n pass\n","sub_path":"python/quiver/bench.py","file_name":"bench.py","file_ext":"py","file_size_in_byte":12186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"226005259","text":"from pyy.databasees.sqllites.pactice.subClass import Department\n# Department 데이터 넣은 후 조회\n\ndbname = 'mall.db'\ncsvfile = 'departments.csv'\ndept = Department(dbname,csvfile)\n\ndept.insert()\nmycursor = dept.getAllData()\nfor dno, name, locations, tel in mycursor:\n print(f'부서번호 : {dno}, '\n f'이름 : {name}, '\n f'위치 : {locations}, '\n f'번호 : {tel}')\nprint('finished')","sub_path":"databasees/sqllites/pactice/dbTest02.py","file_name":"dbTest02.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"357257608","text":"# -*- coding: utf-8 -*-\nimport json\nimport argparse\nfrom .retriever import Queryer\n\n\nDATA_DIR = \"./data/Reddit\"\nINDEX_DIR = \"./index\"\n\n\ndef data_gen(top_k=3):\n train_post, train_res = [], []\n valid_post, valid_res = [], []\n test_post, test_res, test_entities = [], [], []\n\n with open('%s/trainset.txt' % DATA_DIR) as f:\n for idx, line in enumerate(f):\n text_line = json.loads(line)\n post = \" \".join(text_line['post'])\n response = \" \".join(text_line['response'])\n train_post.append(post)\n train_res.append(response)\n if idx % 100000 == 0 and idx > 0:\n print('read train file line %d' % idx)\n\n with open('%s/validset.txt' % DATA_DIR) as f:\n for line in f:\n text_line = json.loads(line)\n post = \" \".join(text_line['post'])\n response = \" \".join(text_line['response'])\n valid_post.append(post)\n valid_res.append(response)\n\n with open('%s/testset.txt' % DATA_DIR) as f:\n for line in f:\n text_line = json.loads(line)\n post = \" \".join(text_line['post'])\n response = \" \".join(text_line['response'])\n test_post.append(post)\n test_res.append(response)\n test_entities.append(text_line['all_entities'])\n\n with open(\"%s/id2response.json\" % INDEX_DIR, 'r') as file:\n id2response = json.load(file)\n\n cnt = 0\n queryer = Queryer(INDEX_DIR, top_k=top_k)\n\n with open('%s/train.txt' % DATA_DIR, 'w') as fw:\n for post, res in zip(train_post, train_res):\n # search corresponding responses of top-k posts\n query = _validate(post)\n result = queryer.run_query(query)\n result_ids = result['ids']\n response_k = []\n for idx in result_ids:\n sent = id2response[idx]\n response_k.append(sent)\n train = {'post': post,\n 'response': res,\n 'corr_responses': response_k}\n json_str = json.dumps(train)\n fw.write(json_str + '\\n')\n cnt += 1\n if cnt % 10000 == 0:\n print(\"%d train done\" % cnt)\n cnt = 0\n with open('%s/valid.txt' % DATA_DIR, 'w') as fw:\n for post, res in zip(valid_post, valid_res):\n # search corresponding responses of top-k posts\n query = _validate(post)\n result = queryer.run_query(query)\n result_ids = result['ids']\n response_k = []\n for idx in result_ids:\n sent = id2response[idx]\n response_k.append(sent)\n valid = {'post': post,\n 'response': res,\n 'corr_responses': response_k}\n json_str = json.dumps(valid)\n fw.write(json_str + '\\n')\n cnt += 1\n if cnt % 1000 == 0:\n print(\"%d valid done\" % cnt)\n\n entity_lists = []\n with open('%s/csk_entity.txt' % DATA_DIR) as f:\n for i, line in enumerate(f):\n e = line.strip()\n entity_lists.append(e)\n\n cnt = 0\n with open('%s/test.txt' % DATA_DIR, 'w') as fw:\n for post, res, all_entity in zip(test_post, test_res, test_entities):\n # search corresponding responses of top-k posts\n query = _validate(post)\n result = queryer.run_query(query)\n result_ids = result['ids']\n response_k = []\n for idx in result_ids:\n sent = id2response[idx]\n response_k.append(sent)\n\n ent_indexs = []\n for ent_list in all_entity:\n for idx in ent_list:\n ent_indexs.append(idx)\n entities = [entity_lists[idx] for idx in ent_indexs]\n\n test = {'post': post,\n 'response': res,\n 'corr_responses': response_k,\n 'entities': entities}\n json_str = json.dumps(test)\n fw.write(json_str + '\\n')\n cnt += 1\n if cnt % 1000 == 0:\n print(\"%d test done\" % cnt)\n\n\ndef _validate(query):\n valid_query = str(query).strip()\n remove_str = ['*', '?', '!', ':', '-', '(', ')', '[', ']', '{', '}']\n for s in remove_str:\n if s in valid_query:\n valid_query = valid_query.replace(s, '')\n\n return valid_query\n\n\ndef main(args):\n data_gen(top_k=args.top_k)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"Generate top-k similar responses for dataset\")\n parser.add_argument('--top_k', type=int, default=3, help='top-k')\n parsed_args = parser.parse_args()\n\n main(parsed_args)\n ","sub_path":"src/utils/response_candgen.py","file_name":"response_candgen.py","file_ext":"py","file_size_in_byte":4739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"217244032","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jul 23 14:21:39 2018\r\n\r\n\r\n作业10:\r\n1.获取2300所学校的编号\r\n2.获取31所城市的编号\r\n3.获取142000数据,31/10,每个组有三个城市的数据,后面组装在一起\r\n4.将142600数据使用spark统计\r\n\r\n@author: Robbin\r\n\"\"\"\r\n\r\n### 1.获取2300所学校的编号\r\nimport re\r\nls = open(\"all_school.txt\",encoding=\"utf-8\").readlines()\r\nschool_ls=[]\r\n\r\nfor line in ls:\r\n r = re.findall('.*[大学|学院]',line)\r\n num = line.split(\"-jianjie-\")[1].split(\".\")[0]\r\n print(''.join(r) +' '+str(num))\r\n\r\n\r\n### 2.获取31所城市的编号\r\nlc = open(\"XML高考派城市.txt\",encoding=\"gbk\").readlines()\r\nfor line in lc[1:-1]:\r\n nu = line.split(\",\")[1].split(\")\")[0]\r\n cn = line.split(\"\\\">\")[1].split(\"<\")[0]\r\n print(str(cn)+' '+str(nu))\r\n\r\n\r\n\r\n### 3.获取142000数据,31/10,每个组有三个城市的数据,后面组装在一起\r\nimport urllib.request as r\r\nls = open(\"all_school.txt\",encoding=\"utf-8\").readlines()\r\nschool_ls=[]\r\n\r\nfor line in ls:\r\n school_ls.append(line.split(\"-jianjie-\")[1].split(\".\")[0]) \r\n\r\nurl=\"http://www.gaokaopai.com/university-ajaxGetMajor.html\"\r\nheaders={\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3493.3 Safari/537.36',\r\n 'X-Requested-With': 'XMLHttpRequest'\r\n }\r\nf=open('北京文科人数.txt','a',encoding='utf-8')\r\nfor schoolid in school_ls:\r\n req=r.Request(url,data='id={}&type=2&city=11&state=1'.format(schoolid).encode(),headers=headers)\r\n d = r.urlopen(req).read().decode('utf-8','igorn'+\"\\n\")\r\n if d.startswith('{'):\r\n f.write(d+'\\n')\r\n print(\"正在写入\"+schoolid+\"数据\")\r\n else:\r\n print(\"该网址出错\"+schoolid) \r\nf.close()\r\n\r\n\r\nimport json\r\n\r\nf1 = open(\"学校名称.txt\",'w',encoding=\"utf-8\")\r\nf2 = open(\"人数.txt\",'w',encoding=\"utf-8\")\r\nls = open(\"北京文科人数.txt\",encoding=\"utf-8\").readlines()\r\nsum = 0\r\nfor line in ls:\r\n r=json.loads(line)\r\n if r[\"status\"]==1:\r\n vs = r[\"data\"]\r\n xn = vs[0][\"school\"]\r\n for line1 in vs:\r\n sum = sum +int(line1[\"plan\"])\r\n f1.write(xn+\"\\n\")\r\n f2.write(str(sum)+\"\\n\")\r\n \r\nf1.close()\r\nf2.close() \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"4-1temp.py","file_name":"4-1temp.py","file_ext":"py","file_size_in_byte":2363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"340732207","text":"\"\"\"Facial Emotion Detector\nprerequisites - Keras, numpy\nBy: Suprotik Dey\nSpecial Thanks to: Sethu Iyer\n\"\"\"\n\n#import\nimport keras\nfrom keras.preprocessing.image import load_img,img_to_array\nimport numpy as np\nfrom keras.models import load_model\nfrom keras.applications import VGG16,imagenet_utils\nfrom keras.models import Model\n\ndef convert_img_to_vector(img_path):\n image = load_img(img_path,target_size=(224,224))\n image = img_to_array(image)\n image = np.expand_dims(image,axis=0)\n image = preprocess(image)\n return image\n\n\ndef get_image_feature(img_path):\n feats = np.transpose(new_model.predict(convert_img_to_vector(img_path)))\n return feats\n\ndef predict_mood(img_path, showPercent=True):\n decode_dict={0: 'Angry', 1: 'Disgusted', 2: 'Happy', 3:'Sad', 4:'Scared',5:'Shocked'}\n feats = get_image_feature(img_path)\n feats = feats.reshape(-1,4096)\n probab = model.predict_proba(feats,verbose=0)\n top_2 = probab[0].argpartition(-2)[-2:][::-1]\n percent_high = np.around(100*probab[0][top_2[0]],decimals=2)\n percent_secondhigh = np.around(100*probab[0][top_2[1]],decimals=2)\n if (showPercent):\n print('The person in the image is '+str(percent_high)+' % '+decode_dict[top_2[0]] +' and '+str(percent_secondhigh)+' % '+decode_dict[top_2[1]])\n return (str(percent_high), str(percent_secondhigh))\n\ndef init():\n global preprocess, model, new_model\n preprocess = imagenet_utils.preprocess_input\n model = VGG16(weights=\"imagenet\")\n new_model = Model(inputs=model.input,outputs=model.layers[21].output)\n model = load_model('./face/face_emotion/trained_model.h5')\n\n\n","sub_path":"face/face_emotion/emotion_detect.py","file_name":"emotion_detect.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"337717581","text":"import sys\r\n\r\ndef calculate(levels):\r\n stars = steps = 0\r\n done = {}\r\n\r\n while stars < 2 * len(levels):\r\n best = None\r\n for i, (star1, star2) in enumerate(levels):\r\n if i not in done:\r\n if star2 <= stars:\r\n best = (i, 2, 2)\r\n break\r\n elif star1 <= stars:\r\n if best is None or (best[1] == 1 and levels[best[0]][1] < star2):\r\n best = (i, 1, 1)\r\n elif done[i] == 1:\r\n if star2 <= stars:\r\n best = (i, 2, 1)\r\n\r\n if best is None:\r\n return None\r\n\r\n stars += best[2]\r\n steps += 1\r\n done[best[0]] = best[1]\r\n\r\n return steps\r\n\r\nwith open(sys.argv[1]) as fin:\r\n with open('output', 'w') as fout:\r\n N = levels = None\r\n for i, line in enumerate(fin):\r\n if i > 0:\r\n if i % 2 == 1:\r\n N = int(line)\r\n else:\r\n assert N is not None\r\n\r\n levels = [map(int, line.split())]\r\n for j in xrange(N - 1):\r\n levels.append(map(int, next(fin).split()))\r\n\r\n value = calculate(levels)\r\n\r\n fout.write('Case #%d: ' % (i / 2,))\r\n fout.write(('%d\\n' % value) if value is not None else 'Too Bad\\n')\r\n\r\n N = levels = None\r\n","sub_path":"solutions_1482494_1/Python/junwhan/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"132371223","text":"\ndef calc_hour(hour1, hour2):\n time_format = {\n '1' : 1,\n '2' : 2,\n '3' : 3,\n '4' : 4,\n '5' : 5,\n '6' : 6,\n '7' : 7,\n '8' : 8,\n '9' : 9,\n '10' : 10,\n '11' : 11,\n '12' : 12,\n '13' : 1,\n '14' : 2,\n '15' : 3,\n '16' : 4,\n '17' : 5,\n '18' : 6,\n '19' : 7,\n '20' : 8,\n '21' : 9,\n '22' : 10,\n '23' : 11,\n '24' : 12,\n }\n if hour1 + hour2 > 24:\n diff = hour2 % 24\n if diff:\n hour2 = diff\n else:\n hour2 = 12\n\n result = time_format[str(hour1 + hour2)]\n\n return result\n\ndef calc_min(min1, min2, hour_result):\n result = min1 + min2\n if result > 60:\n add = result // 60\n hour_result += add\n result -= 60\n if len(str(result)) == 1:\n result = '0' + str(result)\n \n return hour_result, result\n\n\ndef add_time(start, duration, day = 'Funday'):\n\n time, end = start.split()\n hour1, min1 = map(int, time.split(':'))\n hour2, min2 = map(int, duration.split(':'))\n\n hour_result = calc_hour(hour1, hour2)\n hour_result, min_result = calc_min(min1, min2, hour_result)\n end_result = end\n \n if (hour1 + (hour2 % 24)) >= 12 or hour_result >= 12:\n if end == 'AM': end_result = 'PM'\n elif end == 'PM': end_result = 'AM'\n \n result = str(hour_result) + ':' + str(min_result) + ' ' + end_result\n\n if day != 'Funday':\n days = [0, 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']\n day = day.capitalize()\n if hour2 >= 24:\n days_later = round((hour1+hour2) / 24)\n if min1 + min2 > 60:\n days_later += 1\n index = days.index(day)\n day_result = days_later + index\n if day_result == 7:\n day_result = days[day_result]\n else:\n day_result = days[day_result % 7]\n result += f', {day_result}'\n \n else:\n result += f', {day}' \n\n\n if hour2 >= 24:\n days_later = round((hour1+hour2) / 24)\n if min1 + min2 > 60:\n days_later += 1\n if days_later == 1:\n result += ' (next day)'\n else:\n result += f' ({days_later} days later)'\n elif (end == 'PM' and end_result == 'AM'):\n result += ' (next day)'\n # elif end == end_result and hour2 >= 24:\n # result += ' (next day)'\n\n return result\n\n\n\nprint(add_time(\"8:16 PM\", \"466:02\", \"tuesday\"))\n\n\n\n","sub_path":"time_calculator.py","file_name":"time_calculator.py","file_ext":"py","file_size_in_byte":2575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"609092023","text":"import busio\nimport digitalio\nimport board\nimport adafruit_mcp3xxx.mcp3008 as MCP\nimport RPi.GPIO as GPIO\nimport time\nfrom publish import Device\nfrom adafruit_mcp3xxx.analog_in import AnalogIn\n\n\nclass LightSensor():\n def __init__(self, i, stop):\n self.device = Device()\n spi = busio.SPI(clock=board.SCK, MISO=board.MISO, MOSI=board.MOSI)\n cs = digitalio.DigitalInOut(board.D5)\n # Create an MCP3008 object\n mcp = MCP.MCP3008(spi, cs)\n # Create an analog input channel on the MCP3008 pin 0\n self.channel = AnalogIn(mcp, eval(\"MCP.P\" + str(i)))\n self.stop = stop\n self.taken = 0\n\n def detect(self):\n return 'ADC Voltage: ' + str(self.channel.voltage) + 'V'\n \n def hasCar(self):\n self.device.publish('openchirp/device/'+self.device.username + '/light' + str(self.stop), payload=self.channel.voltage, qos=0, retain=True )\n if self.channel.voltage < 2:\n self.taken = 0\n return 0\n else:\n self.taken = 1\n return 1\n\nif __name__ == \"__main__\":\n lg1 = LightSensor(0, 17)\n lg2 = LightSensor(1, 26)\n lg3 = LightSensor(2, 31)\n lg4 = LightSensor(3, 3)\n while True:\n lg1.hasCar()\n lg2.hasCar()\n lg3.hasCar()\n lg4.hasCar()\n","sub_path":"Server/light_cp.py","file_name":"light_cp.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"93524171","text":"import socket\nimport select\nimport sys\n\nHOST = '127.0.0.1'\nPORT = 65432\nname = input('enter your name: ')\n\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n try:\n s.connect((HOST, PORT))\n print('Connect successfully!!')\n s.send(bytes(name, 'utf-8'))\n while True:\n sockets_list = [sys.stdin, s]\n read_sockets, write_socket, error_socket = select.select(sockets_list, [], [])\n for socks in read_sockets:\n if socks == s:\n message = socks.recv(1024).decode()\n print(message)\n else:\n message = input()\n s.send(bytes(name + ': ' + message, 'utf8'))\n except Exception as e:\n print(e)","sub_path":"socket/client2.py","file_name":"client2.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"36013070","text":"#!/usr/bin/python\n# Copyright: (c) 2019, DellEMC\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils import dellemc_ansible_utils as utils\nimport logging\n\n__metaclass__ = type\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'\n }\n\nDOCUMENTATION = r'''\n---\nmodule: dellemc_powermax_srdf\nversion_added: '2.6'\nshort_description: Manage SRDF pair on PowerMax/VMAX Storage\n System\ndescription:\n- Managing SRDF link on PowerMax Storage System includes creating SRDF pair for\n a storage group, modify SRDF mode, modify SRDF state of an existing\n SRDF pair and delete SRDF pair. All create and modify calls are asynchronous\n by default.\nextends_documentation_fragment:\n - dellemc.dellemc_powermax\nauthor:\n- Manisha Agrawal (manisha.agrawal@dell.com)\n\noptions:\n sg_name:\n description:\n - Name of Storage Group. SRDF Pairings are managed at a storage group level.\n - Required to identify the SRDF link.\n type: string\n default: None\n serial_no:\n description:\n - The serial number will refer to the source (R1) PowerMax/VMAX array when\n protecting a storage group. However srdf_state operations may be issued\n from R1 or R2 array. \n type: string\n default: None\n remote_serial_no:\n description:\n - Integer 12 Digit Serial Number of remote PowerMAX or VMAX array (R2).\n - Required while creating an SRDF link.\n type: string\n default: None\n rdfg_no:\n description:\n - The RDF group number.\n - Optional parameter for each call. For create, if specified, the array\n will reuse the RDF group, otherwise return error. For modify and delete\n operations, if the RFD group number is not specified, and the storage\n group is protected by multiple RDF Groups, then an error will be raised.\n type: number\n default: None\n state:\n description:\n - Define whether the SRDF pairing should exist or not.\n - present indicates that the SRDF pairing should exist in system.\n - absent indicates that the SRDF pairing should not exist in system.\n required: true\n choices: [absent, present]\n srdf_mode:\n description:\n - The replication mode of the SRDF pair.\n - Required when creating SRDF pair.\n - Can be modified by providing required value.\n choices: [Active, Adaptive Copy, Synchronous, Asynchronous]\n type: string\n default: None\n srdf_state:\n description:\n - Desired state of the SRDF pairing. While creating a new SRDF pair, allowed\n values are 'Establish' and 'Suspend'. If state is not specified, the pair\n will be created in 'Suspended' state. When modifying the state, only\n certain changes are allowed.\n choices: [Establish, Resume, Restore, Suspend, Swap, Split, Failback,\n Failover, Setbias]\n new_rdf_group:\n description:\n - Overrides the SRDF Group selection functionality and forces the creation\n of a new SRDF Group.\n default: false\n type: bool\n wait_for_completion:\n description:\n - Flag to indicate if the operation should be run synchronously or \n asynchronously. True signifies synchronous execution. By default, all\n create and update operations will be run asynchronously.\n default: False\n type: bool\n job_id:\n description:\n - Job ID of an Asynchronous task. Can be used to get details of a job.\n default: None\n type: str\n witness:\n description:\n - Flag to specify use of Witness for a Metro configuration. Setting to True\n signifies to use Witness, setting it to False signifies to use Bias. It\n is recommended to configure a witness for SRDF Metro in a production\n environment, this is configured via Unipshere for PowerMAX UI or REST.\n - The flag can be set only for modifying srdf_state to either Establish,\n Suspend or Restore.\n - While creating a Metro configuration, witness flag must be set to True.\n default: None\n type: bool\n '''\n \n\nEXAMPLES = r'''\n- name: Create and establish storagegroup SRDF/a pairing\n register: Job_details_body\n dellemc_powermax_srdf:\n unispherehost: \"{{unispherehost}}\"\n universion: \"{{universion}}\"\n verifycert: \"{{verifycert}}\"\n user: \"{{user}}\"\n password: \"{{password}}\"\n serial_no: \"{{serial_no}}\"\n sg_name: \"{{sg_name}}\"\n remote_serial_no: \"{{remote_serial_no}}\"\n srdf_mode: 'Asynchronous'\n srdf_state: 'Establish'\n state: 'present'\n\n - name: Create storagegroup SRDF/s pair in default suspended mode as an\n Synchronous task\n dellemc_powermax_srdf:\n unispherehost: \"{{unispherehost}}\"\n universion: \"{{universion}}\"\n verifycert: \"{{verifycert}}\"\n user: \"{{user}}\"\n password: \"{{password}}\"\n serial_no: \"{{serial_no}}\"\n sg_name: \"{{sg_name2}}\"\n remote_serial_no: \"{{remote_serial_no}}\"\n state: 'present'\n srdf_mode: 'Synchronous'\n wait_for_completion: True\n\n - name: Create storagegroup Metro SRDF pair with Witness for resiliency\n dellemc_powermax_srdf:\n unispherehost: \"{{unispherehost}}\"\n universion: \"{{universion}}\"\n verifycert: \"{{verifycert}}\"\n user: \"{{user}}\"\n password: \"{{password}}\"\n serial_no: \"{{serial_no}}\"\n sg_name: \"{{sg_name}}\"\n remote_serial_no: \"{{remote_serial_no}}\"\n state: 'present'\n srdf_mode: 'Active'\n wait_for_completion: True\n srdf_state: 'Establish'\n\n - name: Suspend storagegroup Metro SRDF pair\n dellemc_powermax_srdf:\n unispherehost: \"{{unispherehost}}\"\n universion: \"{{universion}}\"\n verifycert: \"{{verifycert}}\"\n user: \"{{user}}\"\n password: \"{{password}}\"\n serial_no: \"{{serial_no}}\"\n sg_name: \"{{sg_name}}\"\n remote_serial_no: \"{{remote_serial_no}}\"\n state: 'present'\n srdf_state: 'Suspend'\n\n - name: Establish link for storagegroup Metro SRDF pair and use Bias for\n resiliency\n dellemc_powermax_srdf:\n unispherehost: \"{{unispherehost}}\"\n universion: \"{{universion}}\"\n verifycert: \"{{verifycert}}\"\n user: \"{{user}}\"\n password: \"{{password}}\"\n serial_no: \"{{serial_no}}\"\n sg_name: \"{{sg_name}}\"\n remote_serial_no: \"{{remote_serial_no}}\"\n state: 'present'\n wait_for_completion: False\n srdf_state: 'Establish'\n witness: False\n\n - name: Get SRDF details\n dellemc_powermax_srdf:\n unispherehost: \"{{unispherehost}}\"\n universion: \"{{universion}}\"\n verifycert: \"{{verifycert}}\"\n user: \"{{user}}\"\n password: \"{{password}}\"\n serial_no: \"{{serial_no}}\"\n sg_name: \"{{sg_name}}\"\n state: 'present'\n\n - name: Modify SRDF mode\n dellemc_powermax_srdf:\n unispherehost: \"{{unispherehost}}\"\n universion: \"{{universion}}\"\n verifycert: \"{{verifycert}}\"\n user: \"{{user}}\"\n password: \"{{password}}\"\n serial_no: \"{{serial_no}}\"\n sg_name: \"{{sg_name}}\"\n srdf_mode: 'Synchronous'\n state: 'present'\n\n - name: Failover SRDF link\n dellemc_powermax_srdf:\n unispherehost: \"{{unispherehost}}\"\n universion: \"{{universion}}\"\n verifycert: \"{{verifycert}}\"\n user: \"{{user}}\"\n password: \"{{password}}\"\n serial_no: \"{{serial_no}}\"\n sg_name: \"{{sg_name}}\"\n srdf_state: 'Failover'\n state: 'present'\n\n - name: Get SRDF Job status\n dellemc_powermax_srdf:\n unispherehost: \"{{unispherehost}}\"\n universion: \"{{universion}}\"\n verifycert: \"{{verifycert}}\"\n user: \"{{user}}\"\n password: \"{{password}}\"\n serial_no: \"{{serial_no}}\"\n job_id: \"{{Job_details_body.Job_details.jobId}}\"\n state: 'present'\n\n - name: Establish SRDF link\n dellemc_powermax_srdf:\n unispherehost: \"{{unispherehost}}\"\n universion: \"{{universion}}\"\n verifycert: \"{{verifycert}}\"\n user: \"{{user}}\"\n password: \"{{password}}\"\n serial_no: \"{{serial_no}}\"\n sg_name: \"{{sg_name2}}\"\n srdf_state: 'Establish'\n state: 'present'\n\n - name: Suspend SRDF link\n dellemc_powermax_srdf:\n unispherehost: \"{{unispherehost}}\"\n universion: \"{{universion}}\"\n verifycert: \"{{verifycert}}\"\n user: \"{{user}}\"\n password: \"{{password}}\"\n serial_no: \"{{serial_no}}\"\n sg_name: \"{{sg_name2}}\"\n srdf_state: 'Suspend'\n state: 'present'\n\n - name: Delete SRDF link\n dellemc_powermax_srdf:\n unispherehost: \"{{unispherehost}}\"\n universion: \"{{universion}}\"\n verifycert: \"{{verifycert}}\"\n user: \"{{user}}\"\n password: \"{{password}}\"\n serial_no: \"{{serial_no}}\"\n sg_name: \"{{sg_name}}\"\n state: 'absent'\n'''\n\nRETURN = r'''\nchanged: [localhost] => {\n \"Job_details\": {\n \"completed_date_milliseconds\": 0,\n \"jobId\": \"1570622921504\",\n \"last_modified_date\": \"Oct-09-2019 08:08:41.505\",\n \"last_modified_date_milliseconds\": 1570622921505,\n \"name\": \"Protect Storage Group - SRDF Ansible_Test_SRDF2\",\n \"resourceLink\": \"https://xxx:8443/univmax/restapi/90/replication\n /symmetrix/xx/storagegroup/x/rdf_group/x\",\n \"result\": \"Started job execution on Wed 9 Oct 2019 08:08:43 EDT\",\n \"status\": \"RUNNING\",\n \"task\": [\n {\n \"description\": \"SRDF protect Storage Group Ansible_Test_SRDF2\n to remote array xx, mode = Synchronous, establish = false,\n remote Storage Group = Ansible_Test_SRDF2\",\n \"execution_order\": 1\n }\n ],\n \"username\": \"C:xxx\\\\********\"\n },\n \"SRDF_link_details\": {\n \"hop2Modes\": [],\n \"hop2Rdfgs\": [],\n \"hop2States\": [],\n \"largerRdfSides\": [\n \"Equal\"\n ],\n \"localR1InvalidTracksHop1\": 0,\n \"localR2InvalidTracksHop1\": 0,\n \"modes\": [\n \"Asynchronous\"\n ],\n \"rdfGroupNumber\": 25,\n \"remoteR1InvalidTracksHop1\": 0,\n \"remoteR2InvalidTracksHop1\": 0,\n \"states\": [\n \"Consistent\"\n ],\n \"storageGroupName\": \"Ansible_Test_SRDF\",\n \"symmetrixId\": \"xxx\",\n \"totalTracks\": 8205,\n \"volumeRdfTypes\": [\n \"R1\"\n ]\n },\n \"changed\": true,\n \"invocation\": {\n \"module_args\": {\n \"wait_for_completion\": true,\n \"new_rdf_group\": false,\n \"job_id\": null,\n \"password\": \"VALUE_SPECIFIED_IN_NO_LOG_PARAMETER\",\n \"rdfg_no\": null,\n \"remote_serial_no\": \"xx\",\n \"serial_no\": \"xx\",\n \"sg_name\": \"Ansible_Test_SRDF\",\n \"srdf_mode\": \"Asynchronous\",\n \"srdf_state\": \"Establish\",\n \"state\": \"present\",\n \"unispherehost\": \"xx\",\n \"universion\": 90,\n \"user\": \"VALUE_SPECIFIED_IN_NO_LOG_PARAMETER\",\n \"verifycert\": false\n }\n }\n}\n'''\nLOG = utils.get_logger(\n module_name='dellemc_powermax_srdf',\n log_devel=logging.INFO)\n\nHAS_PYU4V = utils.has_pyu4v_sdk()\n\nPYU4V_VERSION_CHECK = utils.pyu4v_version_check()\n\n# Application Type\nAPPLICATION_TYPE = 'ansible_v1.1'\n\n\nclass PowerMax_SRDF(object):\n\n '''Class with srdf operations'''\n\n def __init__(self):\n ''' Define all parameters required by this module'''\n self.module_params = utils.get_powermax_management_host_parameters()\n self.module_params.update(self.get_powermax_srdf_pair_parameters())\n # initialize the ansible module\n self.module = AnsibleModule(\n argument_spec=self.module_params,\n supports_check_mode=False\n )\n # result is a dictionary that contains changed status, srdf_link\n # and job details\n self.result = {\n \"changed\": False,\n \"SRDF_link_details\": {},\n \"Job_details\": {}}\n if HAS_PYU4V is False:\n self.module.fail_json(msg=\"Ansible modules for PowerMax require \"\n \"the PyU4V python library to be \"\n \"installed. Please install the library \"\n \"before using these modules.\")\n\n if PYU4V_VERSION_CHECK is not None:\n self.module.fail_json(msg=PYU4V_VERSION_CHECK)\n LOG.error(PYU4V_VERSION_CHECK)\n\n universion_details = utils.universion_check(\n self.module.params['universion'])\n LOG.info(\"universion_details: {0}\".format(universion_details))\n\n if not universion_details['is_valid_universion']:\n self.module.fail_json(msg=universion_details['user_message'])\n\n self.u4v_conn = utils.get_U4V_connection(\n self.module.params, application_type=APPLICATION_TYPE)\n self.replication = self.u4v_conn.replication\n LOG.info('Got PyU4V instance for replication on PowerMax ')\n self.idempotency_dict = {\n 'Synchronized': ['Establish', 'Resume'],\n 'Consistent': ['Establish', 'Resume'],\n 'Suspended': ['Suspend', 'Failover'],\n 'Failed Over': ['Suspend', 'Failover'],\n 'SyncInProg': ['Establish', 'Resume'],\n }\n \n self.idempotency_dict_metro = {\n 'Suspended': ['Suspend'],\n 'SyncInProg': ['Establish'],\n 'ActiveActive': ['Establish'],\n 'ActiveBias': ['Establish']\n }\n\n def get_powermax_srdf_pair_parameters(self):\n return dict(\n sg_name=dict(required=False, type='str'),\n remote_serial_no=dict(required=False, type='str'),\n state=dict(required=True, type='str', choices=['present',\n 'absent']),\n srdf_state=dict(required=False, type='str', choices=['Establish',\n 'Resume',\n 'Restore',\n 'Suspend',\n 'Swap',\n 'Split',\n 'Failback',\n 'Failover',\n 'Setbias']),\n srdf_mode=dict(required=False, type='str', choices=['Active',\n 'Adaptive Copy',\n 'Synchronous',\n 'Asynchronous']),\n rdfg_no=dict(type='int', required=False, default=None),\n wait_for_completion=dict(type='bool', required=False, default=False),\n new_rdf_group=dict(type='bool', required=False, default=False),\n witness=dict(type='bool', required=False, default=None),\n job_id=dict(type='str', required=False, default=None))\n\n def get_srdf_link(self, sg_name):\n '''\n Get details of a given srdf_link\n '''\n rdfg_number = self.module.params['rdfg_no']\n if not rdfg_number:\n rdfg_list = self.replication.get_storagegroup_srdfg_list(sg_name)\n if len(rdfg_list) == 0:\n error_msg = 'No RDF group exists for the given storage group'\n LOG.info(error_msg)\n return None\n elif len(rdfg_list) > 1:\n error_msg = (\"Multiple RDF groups exists for the given storage\"\n \" group. Please specify RDF number\")\n LOG.error(error_msg)\n self.module.fail_json(msg=error_msg)\n else:\n rdfg_number = rdfg_list[0]\n\n try:\n # check for Concurrent/star configuration,\n if self.module.params['remote_serial_no']:\n remote_serial_no = self.module.params['remote_serial_no']\n try:\n rdfg_details = self.replication.get_rdf_group(rdfg_number)\n if rdfg_details['remoteSymmetrix'] != remote_serial_no:\n error_msg = (\n \"Remote array for the RDF group number {0} does\"\n \" not match with the given Remote array {1}. Please\"\n \" specify RDF group you want to use. Also note, Ansible\"\n \" modules v1.1 do not support Concurrent SRDF\"\n \" configurations.\".format(\n rdfg_number, remote_serial_no))\n LOG.error(error_msg)\n self.module.fail_json(msg=error_msg)\n except Exception as e:\n error_msg = (\n \"Got error {0} while getting RDF group details for \"\n \"rdfg number {1}\" .format(str(e), rdfg_number))\n LOG.error(error_msg)\n self.module.fail_json(msg=error_msg)\n\n LOG.info(\n \"Getting srdf details for storage group {0} with rdfg number\"\n \"{1}\".format(\n sg_name, rdfg_number))\n srdf_linkFromGet = self.replication.get_storagegroup_srdf_details(\n storagegroup_id=sg_name, rdfg_num=rdfg_number)\n if srdf_linkFromGet:\n LOG.info('SRDF link details fetched are: {0}'.format(\n srdf_linkFromGet))\n return srdf_linkFromGet\n except Exception as e:\n LOG.error(\n \"Got error {0} while getting SRDF details for storage group \"\n \"{1} with rdfg number {2}\" .format(\n str(e), sg_name, rdfg_number))\n return None\n\n def create_srdf_link(self):\n '''\n Create srdf_link for given storagegroup_id group and remote array\n '''\n sg_name = self.module.params['sg_name']\n remote_serial_no = self.module.params['remote_serial_no']\n srdf_mode = self.module.params['srdf_mode']\n if (remote_serial_no is None or srdf_mode is None):\n error_msg = (\n \"Mandatory parameters not found. Required parameters \"\n \"for creating an SRDF link are remote array serial number \"\n \"and SRDF mode\")\n LOG.error(error_msg)\n self.module.fail_json(msg=error_msg)\n try:\n establish_flag = self._compute_required_establish_flag(\n self.module.params['srdf_state'])\n rdfg_number = self.module.params['rdfg_no']\n forceNewRdfGroup = self.module.params['new_rdf_group']\n async_flag = not(self.module.params['wait_for_completion'])\n witness = self.module.params['witness']\n \n if witness is False:\n errorMsg = (\"Create SRDF link operation failed as Ansible\"\n \" modules v1.1 does not allow creation of SRDF\"\n \" links using Bias for resiliency.\")\n LOG.error(errorMsg)\n self.module.fail_json(msg=errorMsg)\n\n msg = (\n \"Creating srdf_link with parameters:sg_name={0}, \"\n \"remote_serial_no={1}, srdfmode={2}, establish_flag={3}, \"\n \"rdfgroup_no={4}, new_rdf_group={5}, async_flag={6}\")\n LOG.info(\n msg.format(\n sg_name,\n remote_serial_no,\n srdf_mode,\n establish_flag,\n rdfg_number,\n forceNewRdfGroup,\n async_flag))\n resp = self.replication.create_storagegroup_srdf_pairings(\n storagegroup_id=sg_name,\n remote_sid=remote_serial_no,\n srdfmode=srdf_mode,\n establish=establish_flag,\n forceNewRdfGroup=forceNewRdfGroup,\n rdfg_number=rdfg_number,\n _async=async_flag)\n LOG.info('Response from create SRDF link call {0}'.format(resp))\n if async_flag:\n self.result['Job_details'] = resp\n self.result['SRDF_link_details'] = None\n else:\n self.result['SRDF_link_details'] = resp\n self.result['Job_details'] = None\n return True\n\n except Exception as e:\n errorMsg = 'Create srdf_link for sg {0} failed with error {1}'.format(\n sg_name, str(e))\n LOG.error(errorMsg)\n self.module.fail_json(msg=errorMsg)\n\n def _compute_required_establish_flag(self, srdf_state):\n if (srdf_state is None or srdf_state == 'Suspend'):\n return False\n elif srdf_state == 'Establish':\n return True\n else:\n errorMsg = (\n \"Creation of SRDF link failed. Allowed states while \"\n \"creating SRDF link are only Establish or Suspend. Got {0}\".format(srdf_state))\n LOG.error(errorMsg)\n self.module.fail_json(msg=errorMsg)\n\n def modify_srdf_mode(self, srdf_mode):\n async_flag = not(self.module.params['wait_for_completion'])\n srdf_link = self.result['SRDF_link_details']\n if srdf_mode == 'Adaptive Copy':\n srdf_mode = 'AdaptiveCopyDisk'\n try:\n resp = self.replication.modify_storagegroup_srdf(\n storagegroup_id=srdf_link['storageGroupName'],\n rdfg=srdf_link['rdfGroupNumber'],\n action='SetMode',\n options={\n 'setMode': {\n 'mode': srdf_mode}},\n _async=async_flag)\n if async_flag:\n self.result['Job_details'] = resp\n self.result['SRDF_link_details'] = None\n else:\n self.result['SRDF_link_details'] = resp\n self.result['Job_details'] = None\n return True\n except Exception as e:\n errorMsg = (\"Modifying SRDF mode of srdf_link from {0} to {1} for \"\n \"SG {2} failed with error {3}\".format(\n srdf_link['modes'][0], srdf_mode,\n srdf_link['storageGroupName'], str(e)))\n LOG.error(errorMsg)\n self.module.fail_json(msg=errorMsg)\n\n def modify_srdf_state(self, action):\n modify_body = {}\n\n async_flag = not(self.module.params['wait_for_completion'])\n srdf_link = self.result['SRDF_link_details']\n\n modify_body['storagegroup_id'] = srdf_link['storageGroupName']\n modify_body['rdfg'] = srdf_link['rdfGroupNumber']\n modify_body['action'] = action\n modify_body['_async'] = async_flag\n\n if self.module.params['witness'] is not None:\n if srdf_link['modes'][0] != 'Active':\n errorMsg = (\"witness flag can not be used for non-Metro \"\n \"configurations.\")\n LOG.error(errorMsg)\n self.module.fail_json(msg=errorMsg)\n elif action not in ['Establish', 'Restore', 'Suspend']:\n errorMsg = (\"witness flag can be used only for 3 actions:\"\n \" Establish, Restore and Suspend\")\n LOG.error(errorMsg)\n self.module.fail_json(msg=errorMsg)\n else:\n modify_body['options'] = {\n action.lower(): {\n 'metroBias': not(self.module.params['witness'])}}\n\n try:\n LOG.info('The modify_body is {0}:'.format(modify_body))\n resp = self.replication.modify_storagegroup_srdf(**modify_body)\n\n if async_flag:\n self.result['Job_details'] = resp\n self.result['SRDF_link_details'] = None\n else:\n self.result['SRDF_link_details'] = resp\n self.result['Job_details'] = None\n return True\n except Exception as e:\n errorMsg = (\"Modifying SRDF state of srdf_link for storage group \"\n \"{0} failed with error {1}\".format(\n srdf_link['storageGroupName'], str(e)))\n LOG.error(errorMsg)\n self.module.fail_json(msg=errorMsg)\n\n def _check_for_SRDF_state_modification(self, new_operation):\n srdf_link = self.result['SRDF_link_details']\n current_state = srdf_link['states'][0]\n changed = False\n \n if (srdf_link['modes'][0] == 'Active' and \n current_state in self.idempotency_dict_metro and\n new_operation in self.idempotency_dict_metro[current_state]\n ):\n LOG.info('Modification of SRDF state not required')\n changed = False\n \n elif (srdf_link['modes'][0] != 'Active' and \n current_state in self.idempotency_dict and \n new_operation in self.idempotency_dict[current_state]):\n LOG.info('Modification of SRDF state not required')\n changed = False\n else:\n LOG.info('Modifying SRDF state from {0} to {1}'.format(\n current_state, new_operation))\n\n changed = self.modify_srdf_state(new_operation)\n\n return changed\n\n def delete_srdf_link(self):\n '''\n Delete srdf_link from system\n '''\n srdf_link = self.result['SRDF_link_details']\n try:\n self.replication.delete_storagegroup_srdf(\n srdf_link['storageGroupName'], int(\n srdf_link['rdfGroupNumber']))\n self.result['SRDF_link_details'] = {}\n return True\n except Exception as e:\n errorMsg = ('Delete srdf_link {0} failed with error {1}'.format(\n srdf_link['storageGroupName'], str(e)))\n LOG.error(errorMsg)\n self.module.fail_json(msg=errorMsg)\n\n def get_job_details(self, job_id):\n try:\n self.result['Job_details'] = self.u4v_conn.common.get_job_by_id(\n job_id)\n except Exception as e:\n errorMsg = (\n 'Get Job details for job_id {0} failed with error {1}'.format(\n job_id, str(e)))\n LOG.error(errorMsg)\n self.module.fail_json(msg=errorMsg)\n\n def perform_module_operation(self):\n '''\n Perform different actions on srdf_link based on user parameter\n chosen in playbook\n '''\n state = self.module.params['state']\n sg_name = self.module.params['sg_name']\n srdf_mode = self.module.params['srdf_mode']\n srdf_state = self.module.params['srdf_state']\n job_id = self.module.params['job_id']\n changed = False\n\n if (job_id and sg_name) or (not job_id and not sg_name):\n errorMsg = 'Please specify either job ID or SG name in one Ansible task'\n LOG.error(errorMsg)\n self.module.fail_json(msg=errorMsg)\n\n if job_id:\n if state == 'present':\n LOG.info('Geting details of the Job {0}'.format(job_id))\n self.get_job_details(job_id)\n else:\n errorMsg = 'Set state=present for getting Job status'\n LOG.error(errorMsg)\n self.module.fail_json(msg=errorMsg)\n else:\n srdf_link = self.get_srdf_link(sg_name)\n self.result['SRDF_link_details'] = srdf_link\n if state == 'present' and not self.result['SRDF_link_details']:\n changed = self.create_srdf_link()\n\n elif state == 'present' and self.result['SRDF_link_details']:\n if (srdf_mode !=\n self.result['SRDF_link_details']['modes'][0] and srdf_mode):\n LOG.info('Modifying SRDF mode from {0} to {1}'.format(\n self.result['SRDF_link_details']['modes'][0], srdf_mode))\n changed = self.modify_srdf_mode(srdf_mode) or changed\n\n if srdf_state is not None:\n changed = self._check_for_SRDF_state_modification(\n srdf_state) or changed\n\n elif state == 'absent' and self.result['SRDF_link_details']:\n LOG.info('Deleting srdf_link with SG {0} '.format(sg_name))\n changed = self.delete_srdf_link() or changed\n\n # Update the module's final state\n LOG.info('changed {0}'.format(changed))\n self.result['changed'] = changed\n self.module.exit_json(**self.result)\n\n\ndef main():\n ''' Create PowerMax_srdf object and perform action on it\n based on user input from playbook'''\n obj = PowerMax_SRDF()\n obj.perform_module_operation()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"dellemc_ansible/powermax/library/dellemc_powermax_srdf.py","file_name":"dellemc_powermax_srdf.py","file_ext":"py","file_size_in_byte":28945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"648952277","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 19 14:39:41 2018\n\n@author: chris_poskitt\n\"\"\"\n\nfrom libdw import sm\n\nclass TorchLight(sm.SM):\n start_state = 0\n \n def get_next_values(self, state, inp):\n assert(inp == 'push')\n assert(0 <= state <= 1)\n \n if state == 0:\n next_state = 1\n output = 'on'\n elif state == 1:\n next_state = 0\n output = 'off'\n \n return next_state, output\n\n# do this in the kernel\n\ns = TorchLight()\ns.start()\ns.step('push')\ns.step('push')\ns.step('push')\n\ns = TorchLight()\ns.start()\nprint(s.transduce(['push'] * 7))","sub_path":"torchlight.py","file_name":"torchlight.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"652216832","text":"# -*- coding: utf-8 -*-\nfrom openerp.osv import osv, fields\nfrom openerp.tools.misc import attrgetter\n\nclass ir_config_parameter(osv.osv):\n _inherit = 'ir.config_parameter'\n\n def onchange_od_model_id(self, cr, uid,ids, od_model_id,context=None):\n res = {}\n if od_model_id:\n data = od_model_id.split(',')\n value=self.pool.get(str(data[0])).browse(cr,uid,int(data[1]),context).name\n res = {'value':{'value':value}}\n return res\n\n def _models_field_get(self, cr, uid, field_key, field_value, context=None):\n get = attrgetter(field_key, field_value)\n obj = self.pool.get('ir.model.fields')\n ids = obj.search(cr, uid, [], context=context)\n res = set()\n for o in obj.browse(cr, uid, ids, context=context):\n res.add(get(o))\n return list(res)\n\n def _models_get(self, cr, uid, context=None):\n return self._models_field_get(cr, uid, 'model', 'model_id.name', context)\n\n _columns = {\n 'od_model_id' : fields.reference('Ref Value/Table',selection=_models_get,size=128,help=\"Select the Resource/Model/Table from where the data has to get\"),\n }\n","sub_path":"orchid_parameter_setting/ir_config_parameter.py","file_name":"ir_config_parameter.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"617534541","text":"'''\r\n\r\nDefiniamo adiacenti di un pixel p di un immagine i pixel adiacenti a p in orizzontale o in verticale.\r\nSe un pixel e' sul bordo dell'immagine il suo vicinato non comprende i pixel non contenuti nell'immagine.\r\nIl pixel dell'immagine con coordinate(x,y) ha dunque come adiacenti i pixel \r\ncon coordinate (x-1,y),(x+1,y),(x,y-1),(x,y+1) appartenenti all'immagine. \r\n \r\nDefiniamo connessi due pixel se e' possibile dall'uno raggiungere l'altro spostandosi solo su \r\npixel adiacenti e dello stesso colore (ovviamente perche' cio' sia possobile e' necessario \r\nche i due pixel abbiano lo stesso colore).\r\n\r\nPer caricare e salvare immagini PNG usate le funzioni load e save che abbiamo preparato nel modulo immagini.py .\r\n\r\nScrivere una funzione ricolora(fname, lista, fnameout) che presi:\r\n- il percorso di un file che contiene un'immagine in formato PNG\r\n- una lista di quadruple del tipo (x,y,c1,c2) dove x e y sono coordinate di un pixel dell'immagine e c1 e c2 due triple colore RGB\r\n- il percorso di un file (fnameout) da creare\r\nlegge l'immagine in fname, esegue un'operazione di ricolorazione di alcuni pixel dell'immagine e \r\nregistra l'immagine ricolorata nel file fnameout.\r\n\r\nL'operazione di ricolorazione e' la seguente. Per ciascuna delle quadruple (x,y,c1,c2) della lista (nell'ordine), \r\n- tutti i pixel connessi al pixel di coordinate (x,y) nell'immagine vanno ricolorati col colore c1, \r\n- tutti i pixel del perimetro (che si trovano sul 'bordo') della zona che si e' appena colorata devono essere ricolorati col colore c2.\r\nIl perimetro della zona colorata è l'insieme dei pixel che non hanno tutti e 4 i vicini che fanno parte della zona ricolorata \r\n(ovvero almeno uno è di un colore diverso da quello che si sta ricolorando oppure almeno uno non esiste perchè sarebbe fuori dall'immagine)\r\n\r\nSi consideri ad esempio l'immagine 'I1.png', l'invocazione di ricolora('I1.png',[(10,10,(255,0,0), (0,0,255))],’OUT1.png')\r\nprodurra' l'immagine 'OUT1.png' identica all'immagine di partenza se non per il fatto che,\r\n tutti i pixel adiacenti al pixel di coordinate (10,10) (e di colore verde), verranno ricolorati \r\n di rosso ((255,0,0)), mentre i pixel sul bordo della zona inizialmente verde vengono ricolorati di blu.\r\n\r\nPer ciascuna area ricolorata bisogna inoltre calcolare area interna e perimetro, che sono definite come segue:\r\n- l'area interna e' il numero di pixel ricolorati con il colore c1\r\n- il perimetro è il numero di pixel ricolorati con il colore c2\r\n\r\nLa funzone deve tornare la lista di coppie (area interna, perimetro) nello stesso ordine in cui sono state colorate le aree.\r\n \r\nPer altri esempi vedere il file grade03.txt \r\n'''\r\n\r\nfrom immagini import *\r\nimport numpy as np\r\nfrom scipy.ndimage.measurements import label\r\nimport copy\r\nrosso = (255, 0, 0)\r\nblu = ( 0, 0, 255)\r\nverde = ( 0, 255, 0)\r\nnero = ( 0, 0, 0)\r\nbianco= (255, 255, 255)\r\ngiallo= (255, 255, 0)\r\ncyan = ( 0, 255, 255)\r\nmagenta= (255, 0, 255)\r\ndef ricolora(fname,lista,output):\r\n listone = []\r\n immagine=load(fname)\r\n altezza=len(immagine)\r\n larghezza=len(immagine[0])\r\n matrice_temporanea=[]\r\n for x,y,c,c2 in lista:\r\n px=immagine[y][x]\r\n matrice_temporanea,pixd=connessioni(c,immagine,px,altezza,larghezza,y,x)\r\n immagine,a,p=calcola(matrice_temporanea,immagine,pixd,c,c2)\r\n listone.append((a,p))\r\n save(immagine,output)\r\n return listone\r\n \r\n \r\ndef connessioni(c,immagine,px,altezza,larghezza,y,x):\r\n matrice_temporanea=copy.deepcopy(immagine)\r\n for i in range(altezza):\r\n for j in range(larghezza):\r\n if immagine[i][j]==px:\r\n matrice_temporanea[i][j]=1\r\n else:\r\n matrice_temporanea[i][j]=0\r\n matrice_temporanea=np.array(matrice_temporanea)\r\n labeled_array, num_features = label(matrice_temporanea)\r\n matrice_temporanea=(labeled_array.tolist())\r\n pixd=matrice_temporanea[y][x]\r\n return matrice_temporanea,pixd\r\n \r\ndef calcola(img,immagine,pixd,c,c2):\r\n bordo = 0\r\n interno = 0\r\n for i in range(len(img)):\r\n for j in range(len(img[0])):\r\n if img[i][j] == pixd:\r\n if limiti(i,j,c2,img) == True :\r\n immagine[i][j] = c2\r\n bordo = bordo + 1\r\n elif contorno(i,j,c2,img,pixd) == True:\r\n immagine[i][j] = c2\r\n bordo = bordo + 1\r\n else :\r\n immagine[i][j] = c\r\n interno = interno + 1\r\n return immagine , interno , bordo\r\ndef contorno(i,j,c2,img,pixd):\r\n a = img[i][j+1]\r\n b = img[i][j-1]\r\n c = img[i+1][j]\r\n d = img[i-1][j]\r\n if a != pixd or b != pixd or c != pixd or d != pixd :\r\n return True\r\n else :\r\n return False\r\ndef limiti(i,j,c2,img):\r\n a = j+1\r\n b = j-1\r\n c = i+1\r\n d = i-1\r\n if d < 0 or b < 0 or a >= len(img[0]) or c >= len(img) :\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n \r\n\r\n \r\n\r\n \r\n\r\n\r\n","sub_path":"students/1799754/homework03/program03.py","file_name":"program03.py","file_ext":"py","file_size_in_byte":5044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"584026590","text":"from odoo import _, api, fields, models, tools\n\nclass AccountInvoice(models.Model):\n _name = 'account.move'\n _inherit = ['account.move']\n\n mx_integritas_prioridad=fields.Char(string=\"Prioridad Lista Negra\",traslate=True,compute='_set_datos_atomaticos')\n\n def _set_datos_atomaticos(self):\n for record in self:\n rfc=record.partner_id.vat or record.partner_id.parent_id.vat\n rfc_ln=self.env['mx_integritas_validacion_rfc.rfc'].search([('name','=',rfc),('is_condonado','=',False)])\n rfc_lnB=self.env['mx_integritas_validacion_rfc.rfc'].search([('name','=',rfc),('is_condonado','=',True)])\n if rfc_ln:\n record.mx_integritas_prioridad='A'\n if rfc_lnB and not rfc_ln:\n record.mx_integritas_prioridad='B'\n if not rfc_ln and not rfc_lnB:\n record.mx_integritas_prioridad=''\n\n","sub_path":"mx_integritas_validacion_rfc/models/Factura.py","file_name":"Factura.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"394359752","text":"import requests\r\nimport sys\r\nimport json\r\nimport os\r\nimport tabulate\r\nimport click\r\nimport pprint\r\nimport time\r\nimport yaml\r\nimport config\r\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\r\n\r\nvmanage_host = config.vmanage_host\r\nvmanage_port = config.vmanage_port\r\nvmanage_username = config.vmanage_username\r\nvmanage_password = config.vmanage_password\r\n\r\n\r\n#YAML_FILE = config.YAML_FILE\r\n\r\n\r\n\r\nrequests.packages.urllib3.disable_warnings()\r\n\r\nclass rest_api_lib:\r\n\r\n def login(self,vmanage_host,vmanage_port, username, password):\r\n \r\n '''Login to vmanage'''\r\n base_url = f'https://{vmanage_host}:{vmanage_port}/'\r\n\r\n login_action = 'j_security_check'\r\n\r\n #Format data for loginForm\r\n login_data = {'j_username' : username, 'j_password' : password}\r\n\r\n\r\n #Url for posting login data\r\n login_url = base_url + login_action\r\n url = base_url + login_url\r\n\r\n sess = requests.session()\r\n\r\n #If the vmanage has a certificate signed by a trusted authority change verify to True\r\n\r\n login_response = sess.post(url=login_url, data=login_data, verify=False)\r\n\r\n try:\r\n cookies = login_response.headers['Set-Cookie']\r\n jsessionid = cookies.split(';')\r\n return(jsessionid[0])\r\n except:\r\n print('No valid JSESSION ID returned\\n')\r\n exit()\r\n \r\n def get_token(self, vmanage_host, vmanage_port, jsessionid):\r\n headers = {'Cookie': jsessionid}\r\n base_url = f'https://{vmanage_host}:{vmanage_port}'\r\n api = '/dataservice/client/token'\r\n url = base_url + api \r\n response = requests.get(url=url, headers=headers, verify=False)\r\n if response.status_code == 200:\r\n return(response.text)\r\n else:\r\n return None\r\n \r\n def get_request(self,mount_point):\r\n '''GET request'''\r\n url = f'https://{vmanage_host}:{vmanage_port}/dataservice/{mount_point}'\r\n #print(url)\r\n \r\n response = requests.get(url, headers=headers, verify=False)\r\n \r\n return response\r\n\r\n def post_request(self, mount_point, payload):\r\n '''POST request'''\r\n url = f'https://{vmanage_host}:{vmanage_port}/dataservice/{mount_point}'\r\n #print(url)\r\n payload = json.dumps(payload)\r\n #print (payload)\r\n #print (headers)\r\n\r\n response = requests.post(url=url, data=payload, headers=headers, verify=False)\r\n #print(response.text)\r\n #data = response\r\n return response\r\n\r\n\r\n#Create session with vmanage \r\nprint()\r\nprint('*'*100)\r\nvmanage_session = rest_api_lib()\r\njsessionid = vmanage_session.login(vmanage_host,vmanage_port,vmanage_username,vmanage_password)\r\nprint('The session id is '+jsessionid)\r\ntoken = vmanage_session.get_token(vmanage_host,vmanage_port,jsessionid)\r\nprint('The token is '+token)\r\nprint('*'*100)\r\nprint()\r\n\r\nif token is not None:\r\n headers = {'Content-Type': 'application/json','Cookie': jsessionid, 'X-XSRF-TOKEN': token}\r\nelse:\r\n headers = {'Content-Type': 'application/json','Cookie': jsessionid}\r\n\r\ndef list_devices():\r\n\r\n # Retrieve and return information about network devices in SD-WAN fabric.\r\n\r\n print('Retrieving the device list')\r\n\r\n response = vmanage_session.get_request('device').json()\r\n\r\n items = response['data']\r\n\r\n print('\\nDevice details retrieved for one network device') \r\n \r\n #pprint.pprint(items[1])\r\n\r\n print('\\nlist of all devices retrieved')\r\n\r\n headers = ['Host-Name', 'Device Type', 'Latitude', 'Longitude', 'Certificate\\nValidity', 'Version', 'Device Model', 'System IP']\r\n table = list()\r\n\r\n for item in items:\r\n if item['reachability'] == 'reachable':\r\n tr = [item['host-name'], item['device-type'], item['latitude'], item['longitude'], item['certificate-validity'], item['version'], item['device-model'], item['system-ip']]\r\n table.append(tr)\r\n try:\r\n print(tabulate.tabulate(table, headers, tablefmt='fancy_grid'))\r\n except UnicodeEncodeError:\r\n print(tabulate.tabulate(table, headers, tablefmt='grid'))\r\n\r\ndef system_status(system_ip):\r\n # Retrieve and return information about system status of network device in SD-WAN fabric\r\n\r\n print('Retrieving the System Status')\r\n\r\n url = 'device/system/status?deviceId={0}'.format(system_ip)\r\n\r\n response = vmanage_session.get_request(url).json()\r\n\r\n items = response['data']\r\n\r\n print('\\nSystem status for Device = ',system_ip)\r\n\r\n headers = ['Host name', 'Up time', 'Version', 'Memory Used', 'CPU system']\r\n table = list()\r\n\r\n for item in items:\r\n tr = [item['vdevice-host-name'], item['uptime'], item['version'], item['mem_used'], item['cpu_system']]\r\n table.append(tr)\r\n\r\n try:\r\n print(tabulate.tabulate(table, headers, tablefmt='fancy_grid'))\r\n except UnicodeEncodeError:\r\n print(tabulate.tabulate(table, headers, tablefmt='grid'))\r\n\r\n\r\ndef interface_status(system_ip):\r\n # Retrieve and return information about Interface status of network device in SD-WAN fabric\r\n\r\n print('Retrieving the interface Status')\r\n\r\n url = 'device/interface/synced?deviceId={0}'.format(system_ip)\r\n\r\n response = vmanage_session.get_request(url).json()\r\n\r\n items = response['data']\r\n\r\n print('\\nInterfaces status for Device = ',system_ip)\r\n\r\n headers = ['Interface Name', 'Operational status']\r\n table = list()\r\n\r\n for item in items:\r\n tr = [item['ifname'], item['if-oper-status']]\r\n table.append(tr)\r\n\r\n try:\r\n print(tabulate.tabulate(table, headers, tablefmt='fancy_grid'))\r\n except UnicodeEncodeError:\r\n print(tabulate.tabulate(table, headers, tablefmt='grid'))\r\n\r\n\r\n\r\ndef control_status(system_ip):\r\n # Retrieve and return information about Control status of network device in SD-WAN fabric\r\n\r\n\r\n print('Retrieving the Control Status')\r\n\r\n url = 'device/control/synced/connections?deviceId={0}'.format(system_ip)\r\n\r\n response = vmanage_session.get_request(url).json()\r\n\r\n items = response['data']\r\n\r\n print('\\nControl Connection status for Device = ',system_ip)\r\n\r\n headers = ['Peer Type', 'Peer System IP', 'state', 'Last Updated']\r\n table = list()\r\n\r\n for item in items:\r\n tr = [item['peer-type'], item['system-ip'], item['state'], time.strftime('%m/%d/%Y %H:%M:%S', time.gmtime(item['lastupdated']/1000.))]\r\n table.append(tr)\r\n\r\n try:\r\n print(tabulate.tabulate(table, headers, tablefmt='fancy_grid'))\r\n except UnicodeEncodeError:\r\n print(tabulate.tabulate(table, headers, tablefmt='grid'))\r\n\r\n\r\n\r\ndef device_counters(system_ip):\r\n # Retrieve and return information about Device Counters of network device in SD-WAN fabric\r\n\r\n print('Retrieving the Device Counters')\r\n\r\n url = 'device/counters?deviceId={0}'.format(system_ip)\r\n\r\n response = vmanage_session.get_request(url).json()\r\n\r\n items = response['data']\r\n\r\n print('\\nDevice Counters for Device = ',system_ip)\r\n\r\n\r\n headers = ['OMP Peers Up', 'OMP Peers Down', 'Vsmart connections', 'BFD Sessions Up', 'BFD Sessions Down']\r\n table = list()\r\n\r\n for item in items:\r\n try:\r\n tr = [item['ompPeersUp'], item['ompPeersDown'], item['number-vsmart-control-connections'], item['bfdSessionsUp'], item['bfdSessionsDown']]\r\n table.append(tr)\r\n except KeyError:\r\n pass\r\n\r\n try:\r\n print(tabulate.tabulate(table, headers, tablefmt='fancy_grid'))\r\n except UnicodeEncodeError:\r\n print(tabulate.tabulate(table, headers, tablefmt='grid'))\r\n\r\n\r\n\r\ndef list_device_template():\r\n # Retrieve and return device templates list.\r\n\r\n print('Retrieving the templates available.')\r\n\r\n response = vmanage_session.get_request('template/device').json()\r\n\r\n items = response['data']\r\n\r\n headers = ['Template Name', 'Device Type', 'Template ID', 'Attached devices', 'Template version']\r\n table = list()\r\n\r\n for item in items:\r\n tr = [item['templateName'], item['deviceType'], item['templateId'], item['devicesAttached'], item['templateAttached']]\r\n table.append(tr)\r\n try:\r\n print(tabulate.tabulate(table, headers, tablefmt='fancy_grid'))\r\n except UnicodeEncodeError:\r\n print(tabulate.tabulate(table, headers, tablefmt='grid'))\r\n\r\n\r\n\r\n\r\ndef create_banner_template():\r\n #create banner template with Cisco SDWAN.\r\n\r\n #print('Loading Feature Template Details from YAML File')\r\n with open('banner_config.yaml') as f:\r\n #print(f)\r\n config = yaml.safe_load(f.read())\r\n\r\n payload = {\r\n 'templateName': config['template_name'],\r\n 'templateMinVersion': '15.0.0',\r\n 'templateDescription': config['template_description'],\r\n 'templateType': 'cisco_banner',\r\n 'templateDefinition': {\r\n 'login': {\r\n 'vipObjectType': 'object',\r\n 'vipType': 'constant',\r\n 'vipValue': config['login_banner'], # using the values defined for login banner in yaml file\r\n 'vipVariableName': 'banner_login'\r\n },\r\n 'motd': {\r\n 'vipObjectType': 'object',\r\n 'vipType': 'constant',\r\n 'vipValue': config['motd_banner'], # using the values defined for motd banner in yaml file\r\n 'vipVariableName': 'banner_motd'\r\n }\r\n },\r\n 'deviceType': [\r\n config['device_type']\r\n ],\r\n 'deviceModels': [\r\n {\r\n 'name': 'vedge-CSR-1000v',\r\n 'displayName': 'vEdge CSR 1000v',\r\n 'deviceType': 'vedge-CSR-1000v',\r\n 'isCliSupported': True,\r\n 'isCiscoDeviceModel': False\r\n }\r\n ],\r\n 'factoryDefault': False\r\n }\r\n\r\n #pprint.pprint(payload)\r\n response = vmanage_session.post_request('template/feature/', payload)\r\n req=response.json()\r\n\r\n if response.status_code == 200:\r\n d=dict(response.json())\r\n print('Created banner feature template with the ID: '+d['templateId'])\r\n return d['templateId']\r\n\r\n else:\r\n print('Failed creating banner template, error: ',response.text)\r\n exit\r\n\r\ndef create_SIG_cred_template():\r\n # create SIG Credential template with Cisco SDWAN.\r\n \r\n\r\n #print('Loading Feature Template Details from YAML File')\r\n with open('SIG_cred_config.yaml') as f:\r\n #print(f)\r\n config = yaml.safe_load(f.read())\r\n\r\n payload = {\r\n\t'templateName':config['template_name'],\r\n\t\t'templateDescription':config['template_description'],\r\n\t\t'templateType':'cisco_sig_credentials',\r\n\t\t'deviceType':[\r\n\t\t\tconfig['device_type']\r\n\t\t],\r\n\t\t'templateMinVersion':'15.0.0',\r\n\t\t'templateDefinition':{\r\n\t\t\t'umbrella':{\r\n\t\t\t\t'api-key':{\r\n\t\t\t\t\t'vipObjectType':'object',\r\n\t\t\t\t\t'vipType':'constant',\r\n\t\t\t\t\t'vipValue':'a27322d2918a4aba9f5afa811110ff41',\r\n\t\t\t\t\t'vipVariableName':'system_api_key'\r\n\t\t\t\t},\r\n\t\t\t\t'api-secret':{\r\n\t\t\t\t\t'vipObjectType':'object',\r\n\t\t\t\t\t'vipType':'constant',\r\n\t\t\t\t\t'vipValue':'2176b00b1e074d93be82f3d5fe287353',\r\n\t\t\t\t\t'vipVariableName':'system_api_secret',\r\n\t\t\t\t\t'vipNeedsEncryption':'true'\r\n\t\t\t\t},\r\n\t\t\t\t'org-id':{\r\n\t\t\t\t\t'vipObjectType':'object',\r\n\t\t\t\t\t'vipType':'constant',\r\n\t\t\t\t\t'vipValue':'5326453',\r\n\t\t\t\t\t'vipVariableName':'system_org_id'\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t },\r\n 'factoryDefault':'false'\r\n\t}\r\n\r\n\r\n #pprint.pprint(payload)\r\n response = vmanage_session.post_request('template/feature/', payload)\r\n req=response.json()\r\n #print(req)\r\n\r\n if response.status_code == 200:\r\n d=dict(response.json())\r\n print('Created SIG Credentials feature template with the ID: '+d['templateId'])\r\n return d['templateId']\r\n\r\n else:\r\n print('Failed creating SIG Credentials template, error: ',response.text)\r\n exit\r\n\r\ndef create_SIG_tunnel_template():\r\n # create SIG Tunnel template with Cisco SDWAN\r\n\r\n\r\n #print('Loading Feature Template Details from YAML File')\r\n with open('tunnel_config.yaml') as f:\r\n config = yaml.safe_load(f.read())\r\n\r\n payload = {\r\n\t\t'templateName':config['template_name'],\r\n\t\t'templateDescription':config['template_description'],\r\n\t\t'templateType':'cisco_secure_internet_gateway',\r\n\t\t'deviceType':[\r\n\t\t\tconfig['device_type']\r\n\t\t],\r\n\t\t'templateMinVersion':'15.0.0',\r\n\t\t'templateDefinition':{\r\n\t\t\t'vpn-id':{\r\n\t\t\t\t'vipObjectType':'object',\r\n\t\t\t\t'vipType':'constant',\r\n\t\t\t\t'vipValue':0\r\n\t\t\t},\r\n\t\t\t'interface':{\r\n\t\t\t\t'vipType':'constant',\r\n\t\t\t\t'vipValue':[\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\t'if-name':{\r\n\t\t\t\t\t\t\t'vipObjectType':'object',\r\n\t\t\t\t\t\t\t'vipType':'constant',\r\n\t\t\t\t\t\t\t'vipValue':'ipsec1',\r\n\t\t\t\t\t\t\t'vipVariableName':'tunnel_if_name'\r\n\t\t\t\t\t\t},\r\n\t\t\t\t\t\t'auto':{\r\n\t\t\t\t\t\t\t'vipObjectType':'object',\r\n\t\t\t\t\t\t\t'vipType':'constant',\r\n\t\t\t\t\t\t\t'vipValue':'true'\r\n\t\t\t\t\t\t},\r\n\t\t\t\t\t\t'shutdown':{\r\n\t\t\t\t\t\t\t'vipObjectType':'object',\r\n\t\t\t\t\t\t\t'vipType':'notIgnore',\r\n\t\t\t\t\t\t\t'vipValue':'false',\r\n\t\t\t\t\t\t\t'vipVariableName':'tunnel_shutdown'\r\n\t\t\t\t\t\t},\r\n\t\t\t\t\t\t'description':{\r\n\t\t\t\t\t\t\t'vipObjectType':'object',\r\n\t\t\t\t\t\t\t'vipType':'constant',\r\n\t\t\t\t\t\t\t'vipValue':'SIG Tunnel',\r\n\t\t\t\t\t\t\t'vipVariableName':'tunnel_description'\r\n\t\t\t\t\t\t},\r\n\t\t\t\t\t\t'ip':{\r\n\t\t\t\t\t\t\t'unnumbered':{\r\n\t\t\t\t\t\t\t\t'vipObjectType':'node-only',\r\n\t\t\t\t\t\t\t\t'vipType':'constant',\r\n\t\t\t\t\t\t\t\t'vipValue':'true'\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t},\r\n\t\t\t\t\t\t'tunnel-source-interface':{\r\n\t\t\t\t\t\t\t'vipObjectType':'object',\r\n\t\t\t\t\t\t\t'vipType':'constant',\r\n\t\t\t\t\t\t\t'vipValue':config['source_interface'],\r\n\t\t\t\t\t\t\t'vipVariableName':'tunnel_tunnel_source_interface'\r\n\t\t\t\t\t\t},\r\n\t\t\t\t\t\t'tunnel-destination':{\r\n\t\t\t\t\t\t\t'vipObjectType':'object',\r\n\t\t\t\t\t\t\t'vipType':'constant',\r\n\t\t\t\t\t\t\t'vipValue':'dynamic'\r\n\t\t\t\t\t\t},\r\n\t\t\t\t\t\t'application':{\r\n\t\t\t\t\t\t\t'vipObjectType':'object',\r\n\t\t\t\t\t\t\t'vipType':'constant',\r\n\t\t\t\t\t\t\t'vipValue':'sig'\r\n\t\t\t\t\t\t},\r\n\t\t\t\t\t\t'tunnel-set':{\r\n\t\t\t\t\t\t\t'vipObjectType':'object',\r\n\t\t\t\t\t\t\t'vipType':'constant',\r\n\t\t\t\t\t\t\t'vipValue':'secure-internet-gateway-umbrella'\r\n\t\t\t\t\t\t},\r\n\t\t\t\t\t\t'tunnel-dc-preference':{\r\n\t\t\t\t\t\t\t'vipObjectType':'object',\r\n\t\t\t\t\t\t\t'vipType':'constant',\r\n\t\t\t\t\t\t\t'vipValue':'primary-dc'\r\n\t\t\t\t\t\t},\r\n\t\t\t\t\t\t'tcp-mss-adjust':{\r\n\t\t\t\t\t\t\t'vipObjectType':'object',\r\n\t\t\t\t\t\t\t'vipType':'ignore',\r\n\t\t\t\t\t\t\t'vipValue':1300,\r\n\t\t\t\t\t\t\t'vipVariableName':'tunnel_tcp_mss_adjust_'\r\n\t\t\t\t\t\t},\r\n\t\t\t\t\t\t'mtu':{\r\n\t\t\t\t\t\t\t'vipObjectType':'object',\r\n\t\t\t\t\t\t\t'vipType':'notIgnore',\r\n\t\t\t\t\t\t\t'vipValue':1400,\r\n\t\t\t\t\t\t\t'vipVariableName':'tunnel_mtu_'\r\n\t\t\t\t\t\t},\r\n\t\t\t\t\t\t'dead-peer-detection':{\r\n\t\t\t\t\t\t\t'dpd-interval':{\r\n\t\t\t\t\t\t\t\t'vipObjectType':'object',\r\n\t\t\t\t\t\t\t\t'vipType':'constant',\r\n\t\t\t\t\t\t\t\t'vipValue':10,\r\n\t\t\t\t\t\t\t\t'vipVariableName':'tunnel_dpd_interval'\r\n\t\t\t\t\t\t\t},\r\n\t\t\t\t\t\t\t'dpd-retries':{\r\n\t\t\t\t\t\t\t\t'vipObjectType':'object',\r\n\t\t\t\t\t\t\t\t'vipType':'constant',\r\n\t\t\t\t\t\t\t\t'vipValue':3,\r\n\t\t\t\t\t\t\t\t'vipVariableName':'tunnel_dpd_retries'\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t},\r\n\t\t\t\t\t\t'ike':{\r\n\t\t\t\t\t\t\t'ike-version':{\r\n\t\t\t\t\t\t\t\t'vipObjectType':'object',\r\n\t\t\t\t\t\t\t\t'vipType':'constant',\r\n\t\t\t\t\t\t\t\t'vipValue':2\r\n\t\t\t\t\t\t\t},\r\n\t\t\t\t\t\t\t'authentication-type':{\r\n\t\t\t\t\t\t\t\t'pre-shared-key-dynamic':{\r\n\t\t\t\t\t\t\t\t\t'vipObjectType':'node-only',\r\n\t\t\t\t\t\t\t\t\t'vipType':'constant',\r\n\t\t\t\t\t\t\t\t\t'vipValue':'true'\r\n\t\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\t},\r\n\t\t\t\t\t\t\t'ike-rekey-interval':{\r\n\t\t\t\t\t\t\t\t'vipObjectType':'object',\r\n\t\t\t\t\t\t\t\t'vipType':'ignore',\r\n\t\t\t\t\t\t\t\t'vipValue':14400,\r\n\t\t\t\t\t\t\t\t'vipVariableName':'tunnel_ike_rekey_interval_'\r\n\t\t\t\t\t\t\t},\r\n\t\t\t\t\t\t\t'ike-ciphersuite':{\r\n\t\t\t\t\t\t\t\t'vipObjectType':'object',\r\n\t\t\t\t\t\t\t\t'vipType':'ignore',\r\n\t\t\t\t\t\t\t\t'vipValue':'aes256-cbc-sha1',\r\n\t\t\t\t\t\t\t\t'vipVariableName':'tunnel_ike_ciphersuite'\r\n\t\t\t\t\t\t\t},\r\n\t\t\t\t\t\t\t'ike-group':{\r\n\t\t\t\t\t\t\t\t'vipObjectType':'object',\r\n\t\t\t\t\t\t\t\t'vipType':'notIgnore',\r\n\t\t\t\t\t\t\t\t'vipValue':'14',\r\n\t\t\t\t\t\t\t\t'vipVariableName':'tunnel_ike_group'\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t},\r\n\t\t\t\t\t\t'ipsec':{\r\n\t\t\t\t\t\t\t'ipsec-rekey-interval':{\r\n\t\t\t\t\t\t\t\t'vipObjectType':'object',\r\n\t\t\t\t\t\t\t\t'vipType':'ignore',\r\n\t\t\t\t\t\t\t\t'vipValue':3600,\r\n\t\t\t\t\t\t\t\t'vipVariableName':'tunnel_ipsec_rekey_interval'\r\n\t\t\t\t\t\t\t},\r\n\t\t\t\t\t\t\t'ipsec-replay-window':{\r\n\t\t\t\t\t\t\t\t'vipObjectType':'object',\r\n\t\t\t\t\t\t\t\t'vipType':'ignore',\r\n\t\t\t\t\t\t\t\t'vipValue':512,\r\n\t\t\t\t\t\t\t\t'vipVariableName':'tunnel_ipsec_replay_window'\r\n\t\t\t\t\t\t\t},\r\n\t\t\t\t\t\t\t'ipsec-ciphersuite':{\r\n\t\t\t\t\t\t\t\t'vipObjectType':'object',\r\n\t\t\t\t\t\t\t\t'vipType':'notIgnore',\r\n\t\t\t\t\t\t\t\t'vipValue':'aes256-gcm',\r\n\t\t\t\t\t\t\t\t'vipVariableName':'tunnel_ipsec_ciphersuite'\r\n\t\t\t\t\t\t\t},\r\n\t\t\t\t\t\t\t'perfect-forward-secrecy':{\r\n\t\t\t\t\t\t\t\t'vipObjectType':'object',\r\n\t\t\t\t\t\t\t\t'vipType':'notIgnore',\r\n\t\t\t\t\t\t\t\t'vipValue':'none',\r\n\t\t\t\t\t\t\t\t'vipVariableName':'tunnel_perfect_forward_secrecy'\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t}\r\n\t\t\t\t],\r\n\t\t\t\t'vipObjectType':'tree',\r\n\t\t\t\t'vipPrimaryKey':[\r\n\t\t\t\t\t'if-name'\r\n\t\t\t\t]\r\n\t\t\t},\r\n\t\t\t'service':{\r\n\t\t\t\t'vipType':'constant',\r\n\t\t\t\t'vipValue':[\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\t'svc-type':{\r\n\t\t\t\t\t\t\t'vipObjectType':'object',\r\n\t\t\t\t\t\t\t'vipType':'constant',\r\n\t\t\t\t\t\t\t'vipValue':'sig'\r\n\t\t\t\t\t\t},\r\n\t\t\t\t\t\t'ha-pairs':{\r\n\t\t\t\t\t\t\t'interface-pair':{\r\n\t\t\t\t\t\t\t\t'vipType':'constant',\r\n\t\t\t\t\t\t\t\t'vipObjectType':'tree',\r\n\t\t\t\t\t\t\t\t'vipPrimaryKey':[\r\n\t\t\t\t\t\t\t\t\t'active-interface',\r\n\t\t\t\t\t\t\t\t\t'backup-interface'\r\n\t\t\t\t\t\t\t\t],\r\n\t\t\t\t\t\t\t\t'vipValue':[\r\n\t\t\t\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\t\t\t\t'active-interface':{\r\n\t\t\t\t\t\t\t\t\t\t\t'vipObjectType':'object',\r\n\t\t\t\t\t\t\t\t\t\t\t'vipType':'constant',\r\n\t\t\t\t\t\t\t\t\t\t\t'vipValue':'ipsec1'\r\n\t\t\t\t\t\t\t\t\t\t},\r\n\t\t\t\t\t\t\t\t\t\t'backup-interface':{\r\n\t\t\t\t\t\t\t\t\t\t\t'vipObjectType':'object',\r\n\t\t\t\t\t\t\t\t\t\t\t'vipType':'constant',\r\n\t\t\t\t\t\t\t\t\t\t\t'vipValue':'None'\r\n\t\t\t\t\t\t\t\t\t\t},\r\n\t\t\t\t\t\t\t\t\t\t'active-interface-weight':{\r\n\t\t\t\t\t\t\t\t\t\t\t'vipObjectType':'object',\r\n\t\t\t\t\t\t\t\t\t\t\t'vipType':'constant',\r\n\t\t\t\t\t\t\t\t\t\t\t'vipValue':1\r\n\t\t\t\t\t\t\t\t\t\t},\r\n\t\t\t\t\t\t\t\t\t\t'backup-interface-weight':{\r\n\t\t\t\t\t\t\t\t\t\t\t'vipObjectType':'object',\r\n\t\t\t\t\t\t\t\t\t\t\t'vipType':'constant',\r\n\t\t\t\t\t\t\t\t\t\t\t'vipValue':1\r\n\t\t\t\t\t\t\t\t\t\t},\r\n\t\t\t\t\t\t\t\t\t\t'priority-order':[\r\n\t\t\t\t\t\t\t\t\t\t\t'active-interface',\r\n\t\t\t\t\t\t\t\t\t\t\t'backup-interface',\r\n\t\t\t\t\t\t\t\t\t\t\t'active-interface-weight',\r\n\t\t\t\t\t\t\t\t\t\t\t'backup-interface-weight'\r\n\t\t\t\t\t\t\t\t\t\t]\r\n\t\t\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\t\t]\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t},\r\n\t\t\t\t\t\t'umbrella-data-center':{\r\n\t\t\t\t\t\t\t'data-center-primary':{\r\n\t\t\t\t\t\t\t\t'vipObjectType':'object',\r\n\t\t\t\t\t\t\t\t'vipType':'ignore',\r\n\t\t\t\t\t\t\t\t'vipValue':'',\r\n\t\t\t\t\t\t\t\t'vipVariableName':'vpn_umbprimarydc'\r\n\t\t\t\t\t\t\t},\r\n\t\t\t\t\t\t\t'data-center-secondary':{\r\n\t\t\t\t\t\t\t\t'vipObjectType':'object',\r\n\t\t\t\t\t\t\t\t'vipType':'ignore',\r\n\t\t\t\t\t\t\t\t'vipValue':'',\r\n\t\t\t\t\t\t\t\t'vipVariableName':'vpn_umbsecondarydc'\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t}\r\n\t\t\t\t],\r\n\t\t\t\t'vipObjectType':'tree',\r\n\t\t\t\t'vipPrimaryKey':[\r\n\t\t\t\t\t'svc-type'\r\n\t\t\t\t]\r\n\t\t\t}\r\n\t\t},\r\n\t\t'factoryDefault':'false'\r\n\t}\r\n\r\n\r\n #pprint.pprint(payload)\r\n response = vmanage_session.post_request('template/feature/', payload)\r\n req=response.json()\r\n\r\n if response.status_code == 200:\r\n d=dict(response.json())\r\n print('Created SIG Credentials feature template with the ID: '+d['templateId'])\r\n return d['templateId']\r\n\r\n else:\r\n print('Failed creating banner template, error: ',response.text)\r\n exit\r\n\t\t\r\ndef list_device_template():\r\n #Retrieve and return device templates list\r\n\r\n print('Retrieving the templates available.')\r\n\r\n response = vmanage_session.get_request('template/device').json()\r\n\r\n items = response['data']\r\n\r\n headers = ['Template Name', 'Device Type', 'Template ID', 'Attached devices', 'Template version']\r\n table = list()\r\n\r\n for item in items:\r\n if item['lastUpdatedBy']=='admin':\r\n tr = [item['templateName'], item['deviceType'], item['templateId'], item['devicesAttached'], item['templateAttached']]\r\n table.append(tr)\r\n try:\r\n print(tabulate.tabulate(table, headers, tablefmt='fancy_grid'))\r\n except UnicodeEncodeError:\r\n print(tabulate.tabulate(table, headers, tablefmt='grid'))\r\n\r\n\r\n\r\n\r\ndef create_device_template():\r\n #create device template with Cisco SDWAN\r\n print('Creating device template based on yaml file details')\r\n with open('device_template_config.yaml') as f:\r\n #print(f)\r\n config = yaml.safe_load(f.read())\r\n #print(config)\r\n\r\n payload = {\r\n 'templateName': config['template_name'],\r\n 'templateDescription': config['template_description'],\r\n 'deviceType': config['device_type'],\r\n 'configType': 'template',\r\n 'policyId': '',\r\n 'factoryDefault': 'false',\r\n 'featureTemplateUidRange': [],\r\n 'generalTemplates': [\r\n {\r\n 'templateId': '02e3cf1e-d826-4152-afc6-f681f1026247',\r\n 'templateType': 'cedge_aaa'\r\n },\r\n {\r\n 'templateId': 'c4b60893-91ee-4341-807e-9889114edeb0',\r\n 'templateType': 'cisco_system'\r\n },\r\n {\r\n 'templateId': '9f516c0a-ecc8-45ac-8f6c-1b7349112229',\r\n 'templateType': 'cisco_omp'\r\n },\r\n {\r\n 'templateId': 'cf144d5e-0829-4cab-a5b2-097ade0ba363',\r\n 'templateType': 'cisco_vpn',\r\n 'subTemplates': [\r\n {\r\n 'templateId': config['sig_tunnel_id'],\r\n 'templateType': 'cisco_vpn_interface'\r\n },\r\n {\r\n 'templateId': 'a84fc720-d9f8-4746-8226-0fbce526ae69',\r\n 'templateType': 'cisco_vpn_interface'\r\n }\r\n ]\r\n },\r\n {\r\n 'templateId': '8535dd50-9b77-4b2d-8b36-435127f43a19',\r\n 'templateType': 'cisco_vpn',\r\n 'subTemplates': [\r\n {\r\n 'templateId': 'd3de90df-3005-4a8e-ac90-8927a6c1c3ef',\r\n 'templateType': 'cisco_vpn_interface'\r\n }\r\n ]\r\n },\r\n {\r\n 'templateId': 'e4b1b450-4999-4405-b2d1-9b149cb9441b',\r\n 'templateType': 'cisco_vpn',\r\n 'subTemplates': [\r\n {\r\n 'templateId': '88c1d524-8dcf-412e-899b-28a204e962d3',\r\n 'templateType': 'cisco_ospf'\r\n },\r\n {\r\n 'templateId': '831c9cb1-62d6-4e26-b537-e99355f24fed',\r\n 'templateType': 'cisco_vpn_interface'\r\n },\r\n {\r\n 'templateId': 'fb6f0489-87e3-4eef-8297-d577a7a3c1d5',\r\n 'templateType': 'cisco_vpn_interface'\r\n }\r\n ]\r\n }, \r\n {\r\n 'templateId': config['sig_cred_id'],\r\n 'templateType': 'cisco_sig_credentials'\r\n },\r\n {\r\n 'templateId': config['banner_id'],\r\n 'templateType': 'cisco_banner'\r\n },\r\n {\r\n 'templateId': 'ed600ed2-3a92-4804-bb00-67ae21c3aa07',\r\n 'templateType': 'cisco_smnp'\r\n },\r\n {\r\n 'templateId': 'dbe0d474-81fe-4791-8667-affc33d90289',\r\n 'templateType': 'cisco_logging'\r\n },\r\n {\r\n 'templateId': '178199ac-3e3c-4734-b81c-0af67cfdde25',\r\n 'templateType': 'cisco_bfd'\r\n },\r\n {\r\n 'templateId': '53a0e87f-a276-438b-8966-787ccf434233',\r\n 'templateType': 'cisco_security'\r\n },\r\n {\r\n 'templateId': '1948df12-dc42-4342-a2d1-f4200a6fdf45',\r\n 'templateType': 'cedge_global'\r\n }\r\n ]\r\n }\r\n #pprint.pprint(payload)\r\n response = vmanage_session.post_request('template/device/feature/', payload)\r\n req=response.json()\r\n\r\n if response.status_code == 200:\r\n d=dict(response.json())\r\n #print(d)\r\n print('Created Device template with ID: '+d['templateId'])\r\n return d['templateId']\r\n\r\n else:\r\n print('Failed creating banner template, error: ',response.text)\r\n exit\r\n \r\n\r\ndef list_feature():\r\n #Retrieve and return the NON default feature template\r\n \r\n print('Retrieving the templates available.')\r\n\r\n response = vmanage_session.get_request('template/feature').json()\r\n\r\n items = response['data']\r\n\r\n headers = ['Template Name', 'Model', 'Template Type', 'Template ID']\r\n table = list()\r\n \r\n for item in items:\r\n if item['createdBy']=='admin':\r\n tr = [item['templateName'], item['deviceType'], item['templateType'], item['templateId']]\r\n table.append(tr)\r\n try:\r\n print(tabulate.tabulate(table, headers, tablefmt='fancy_grid'))\r\n except UnicodeEncodeError:\r\n print(tabulate.tabulate(table, headers, tablefmt='grid'))\r\n\r\ndef attach(template, variables):\r\n \"\"\"Attach a template with Cisco SDWAN.\r\n Provide all template parameters and their values as arguments.\r\n Example command:\r\n ./sdwan.py attach --template template-id --variables Site-3-vEdge-Variables.yaml\r\n \r\n print(\"Attempting to attach template.\")\r\n \r\n with open('attach_config.yaml) as f:\r\n config = yaml.safe_load(f.read())\r\n\r\n system_ip = config.get(\"system_ip\")\r\n host_name = config.get(\"host_name\")\r\n template_id = template\r\n\r\n template_variables = {\r\n \"csv-status\":\"complete\",\r\n \"csv-deviceId\": config.get(\"device_id\"),\r\n \"csv-deviceIP\": system_ip,\r\n \"csv-host-name\": host_name,\r\n \"//system/host-name\": config.get(\"system_host_name\"),\r\n \"//system/system-ip\": config.get(\"system_system_ip\"),\r\n \"//system/site-id\": config.get(\"site_id\"),\r\n \"/1/vpn_1_if_name/interface/ip/address\": config.get(\"vpn_1_if_ipv4_address\"),\r\n \r\n \"/512/vpn_512_if_name/interface/ip/address\": config.get(\"vpn_512_if_ipv4_address\"),\r\n \"/0/vpn-instance/ip/route/0.0.0.0/0/next-hop/mpls_next_hop/address\": config.get(\"mpls_next_hop\"),\r\n \"/0/vpn-instance/ip/route/0.0.0.0/0/next-hop/public_internet_next_hop/address\": config.get(\"public_internet_next_hop\"),\r\n \"/0/vpn_public_internet_interface/interface/if-name\": config.get(\"vpn_public_internet_interface\"),\r\n \"/0/vpn_public_internet_interface/interface/ip/address\": config.get(\"vpn_public_internet_interface\"),\r\n \"/0/vpn_mpls_interface/interface/if-name\": config.get(\"vpn_mpls_interface\"),\r\n \"/0/vpn_mpls_interface/interface/ip/address\": config.get(\"vpn_mpls_if_ipv4_address\"),\r\n }\r\n\r\n\r\n payload = {\r\n \"deviceTemplateList\":[\r\n {\r\n \"templateId\":template_id, \r\n \"device\":[ template_variables ],\r\n \"isEdited\":\"false\", \r\n \"isMasterEdited\":\"false\" \r\n }\r\n ]\r\n }\r\n\r\n url = base_url + \"/template/device/config/attachfeature\"\r\n\r\n response = requests.post(url=url, data=json.dumps(payload), headers=header, verify=False)\r\n if response.status_code == 200:\r\n attach_template_pushid = response.json()['id']\r\n url = base_url + \"/device/action/status/%s\"%attach_template_pushid\r\n while(1):\r\n template_status_res = requests.get(url,headers=header,verify=False)\r\n if template_status_res.status_code == 200:\r\n template_push_status = template_status_res.json()\r\n if template_push_status['summary']['status'] == \"done\":\r\n if 'Success' in template_push_status['summary']['count']:\r\n print(\"Attached Site 3 vEdge Template\")\r\n elif 'Failure' in template_push_status['summary']['count']:\r\n print(\"Failed to attach Site 3 vEdge Template\")\r\n exit()\r\n break\r\n else: \r\n print(\"\\nFetching template push status failed\")\r\n exit()\r\n\r\n else:\r\n print(\"Failed to attach Site 3 vEdge Template\")\r\n exit()\r\n \"\"\"\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n # List starting feature templates\r\n list_feature()\r\n print('*'*80)\r\n \r\n\t# Collect Banner info, read YAML as Dict, add input to Dict, and write to YAML files\r\n input('Press Enter to create a new banner template using the API: ')\r\n tname = input('Enter your template name with no spaces (Example PodXX_Banner_FT): ')\r\n tdesc = input('Enter your template description (Example PodXX Banner Feature Template): ')\r\n tlogin = input('Enter your login banner text: ')\r\n tmotd = input('Enter your motd banner text: ')\r\n with open('banner_config.yaml','r') as f:\r\n data_list = yaml.safe_load(f)\r\n data_list.update({'template_name': tname,'template_description': tdesc,'login_banner': tlogin, 'motd_banner': tmotd})\r\n with open('banner_config.yaml', 'w') as f:\r\n yaml.dump(data_list, f)\r\n bannerID = create_banner_template()\r\n #print('A banner template has been created with an id of: '+bannerID)\r\n print('*'*80)\r\n\r\n\t# Collect SIG Cred info, read YAML as Dict, add input to Dict, and write to YAML files\r\n input(\"Press Enter to create a SIG Credential Feature Template: \")\t\r\n tname = input('Enter your template name with no spaces (Example PodXX_SIG_Cred_FT): ')\r\n tdesc = input('Enter your template description (Example PodXX SIG Credentials Feature Template): ')\r\n with open('SIG_cred_config.yaml','r') as f:\r\n data_list = yaml.safe_load(f)\r\n data_list.update({'template_name': tname,'template_description': tdesc})\r\n with open('SIG_cred_config.yaml', 'w') as f:\r\n yaml.dump(data_list, f)\r\n sigCredID = create_SIG_cred_template()\r\n #print('A SIG Credential template has been created with an id of: '+sigCredID)\r\n print('*'*80)\r\n \r\n # Collect SIG Tunnel info, read YAML as Dict, add input to Dict, and write to YAML files\r\n input(\"Press Enter to create a SIG Tunnel Feature Template: \")\r\n tname = input('Enter your template name with no spaces (Example PodXX_SIG_Tunnel_FT): ')\r\n tdesc = input('Enter your template description (Example PodXX SIG Tunnel Feature Template): ')\r\n with open('tunnel_config.yaml','r') as f:\r\n data_list = yaml.safe_load(f)\r\n data_list.update({'template_name': tname,'template_description': tdesc})\r\n with open('tunnel_config.yaml', 'w') as f:\r\n yaml.dump(data_list, f)\r\n sigTunnelID = create_SIG_tunnel_template()\r\n #print('A SIG Tunnel template has been created with an id of: '+sigTunnelID) \r\n print('*'*80)\r\n\r\n # List all non default feature templates\r\n input('Press Enter to see the 3 new templates have been added: ')\r\n list_feature()\r\n print('*'*80)\r\n\r\n # List all non default device templates\t\r\n input('Press Enter to view the starting device templates')\r\n list_device_template()\r\n print('*'*80)\r\n\r\n #Collect Device Template info, read YAML as Dict, add input to Dict, and write to YAML files\t\r\n input('Press Enter to create a new devices template: ')\r\n dtname = input('Enter your device template name with no spaces (Example PodXX_Device_Template): ')\r\n dtdesc = input('Enter your device template description (Example PodXX Device Template): ')\r\n with open('device_template_config.yaml','r') as f:\r\n data_list = yaml.safe_load(f)\r\n data_list.update({'template_name': dtname,'template_description': dtdesc, 'banner_id': bannerID, 'sig_cred_id': sigCredID, 'sig_tunnel_id': sigTunnelID})\r\n with open('device_template_config.yaml', 'w') as f:\r\n yaml.dump(data_list, f)\r\n create_device_template()\r\n print('*'*80)\r\n\r\n # List all non default device template\r\n list_device_template()\r\n print('*'*80)\r\n\r\n # List Devices in the Lab\t\r\n input('Press Enter to view devices: ')\r\n list_devices()\r\n print('*'*80)\r\n\t\r\n print('End of Script')\r\n print('*'*80)\r\n","sub_path":"Case3/case3.py","file_name":"case3.py","file_ext":"py","file_size_in_byte":31533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"350458934","text":"t=int(input())\nfor k in range(0,t):\n n=int(input())\n a=list(map(int,input().split()))\n maxn=0\n for i in range(0,n):\n wide=1\n for j in range(i+1,n):\n if a[j]maxn:\n maxn=a[i]*wide\n print(maxn)","sub_path":"Code/CodeRecords/2473/60670/257759.py","file_name":"257759.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"632552427","text":"from textrank.TextRankforKeyword import TextRankforKeyword\nfrom textrank.TextRankforSentence import TextRankforSentence\n\n\ndef load_docs(path):\n with open(path, mode=\"r\", encoding=\"utf8\") as f:\n doc = f.read()\n return doc\n\n\ndef main():\n\n path = \"04.txt\"\n doc = load_docs(path)\n\n tr4w = TextRankforKeyword(stop_words_file='./stopwords.txt')\n\n # py2中text必须是utf8编码的str或者unicode对象,py3中必须是utf8编码的bytes或者str对象\n tr4w.analyze(text=doc, window=5)\n\n print(\"\")\n print('关键词:')\n for item in tr4w.get_keywords(10, word_min_len=1):\n print(item.word, item.weight)\n\n print()\n print('关键短语:')\n for phrase in tr4w.get_keyphrases(keywords_num=10, min_occur_num=1):\n print(phrase)\n\n tr4s = TextRankforSentence(stop_words_file='./stopwords.txt')\n tr4s.analyze(text=doc, lower=True, source='all_filters')\n\n print()\n print('摘要:')\n for item in tr4s.get_key_sentences(num=5):\n print(item.index,item.sentence,item.weight)\n\nif __name__ == '__main__':\n main()","sub_path":"myTextRank/extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"81984620","text":"'''\nSolution:\n1. If a digit is occurred, add the value to the current value.\n2. If an operator is found, push the current value to the stack and refresh the current value.\n3. At the end, add all values in the stack.\n\nTime Complexity: O(n) | Space Complexity: O(n)\n\n--- Passed all testcases successfully on Leetcode\n'''\n\n\nclass Calculator:\n def calculate(self, s: str) -> int:\n \n # edge case check\n if (s == None or len(s) == 0):\n return 0\n \n # initializations\n digits = '0123456789'\n operators = '+-*/'\n \n stack = []\n cursor = 0\n sign = '+'\n value = 0\n \n # iterate until the cursor doesn't cross the length of the dtring\n while (cursor < len(s)):\n \n # if value occurred\n if (s[cursor] in digits):\n value = (value * 10) + int(s[cursor])\n \n # if operator occurred or is the last element in the string\n if ( (s[cursor] in operators) or cursor == len(s)-1):\n \n # if sign is + or -, add the value to the stack with the sign\n if (sign == '+'):\n stack.append(value)\n value = 0\n elif (sign == '-'):\n stack.append(-value)\n value = 0\n # if * or /, pop the top value, perform computation with the current value\n # and push the result to the stack\n elif (sign == '*'):\n tempValue = stack.pop()\n stack.append(tempValue * value)\n value = 0\n elif (sign == '/'):\n tempValue = stack.pop()\n stack.append(int(tempValue / value))\n value = 0\n \n # update the sign\n sign = s[cursor]\n \n cursor += 1\n \n result = 0\n\n # add all elements present inside the stack\n while (len(stack) > 0):\n result += (stack.pop())\n \n # return the result\n return result","sub_path":"Calculator.py","file_name":"Calculator.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"605691080","text":"import torch\nimport torch.nn as nn\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom torch.autograd import Variable\n\nimport dataset\nimport dbapi\nimport sqlalchemy\nimport pymysql\n\n\n# AWS CREDENTIALS\nHOST = \"hedgedb.c288vca6ravj.us-east-2.rds.amazonaws.com\"\nPORT = 3306\nDB_NAME = \"scores_timeseries\"\nDB_USER = \"hedgeADMIN\"\nDB_PW = \"bluefootedboobie123\"\n\n# connect to Dataset and AWS to pull data \nscores_db = dataset.connect(\"sqlite:///scorebase.db\")\nAWS_RDS = dataset.connect(\"mysql+pymysql://{}:{}@{}/{}\".format\\\n(DB_USER, DB_PW, HOST, DB_NAME), engine_kwargs = {'pool_recycle': 3600})\n\n\nin_size = 3 # twitter_sent, headline_sent, wiki_views\nout_size = 1 # composite output\nnum_epochs = 800\nlearning_rate = 0.002\n\n\n#Data set\n\n#x_train = np.array([[1.564],[2.11],[3.3],[5.4]], dtype=np.float32)\nx_train = np.array([\n [450.,80.,14752.],\n [300.,88.,11000.],\n [260.,91.,9000.],\n [496.,98.,11000.],\n [200.,63.,12000.]],dtype=np.float32)\n\n#y_train = np.array([[8.0],[19.0],[25.0],[34.45]], dtype= np.float32)\ny_train = np.array([[3.2],[1.8],[0.2],[1.0],[0.5]],dtype=np.float32)\n\nprint('x_train:\\n',x_train)\nprint('y_train:\\n',y_train)\n\ninputs, outputs = torch.Tensor(x_train).unsqueeze(1), torch.Tensor(Y).unsqueeze(1)\n\nclass LinearRegression(nn.Module):\n\n def __init__(self):\n super(LinearRegression,self).__init__()\n self.linear = nn.Linear(3, 1, bias=True)\n\n\n def forward(self,x):\n out = self.linear(x) #Forward propogation using linear model\n return out\n\nmodel = LinearRegression()\n\n#Lost and Optimizer\ncriterion = nn.SmoothL1Loss() # using Mean Squared Error loss\noptimizer = torch.optim.SGD(model.parameters(),lr=learning_rate, weight_decay=1) # using Stochastic Gradient Descent\n\n# train the Model\nfor epoch in range(num_epochs):\n\n #convert numpy arrays for training and results to torch tensor Variable class\n inputs = Variable(x_train)\n target = Variable(y_train)\n\n inputs, outputs = Variable(inputs.cuda()), Variable(outputs.cuda())\n\n #forward\n outputs = model(inputs) # generate output from model with all input vectors\n loss = criterion(outputs,target) #loss function\n \n #backwards\n optimizer.zero_grad() # zero the gradients\n loss.backward() #backward propogation\n optimizer.step() #1-step optimization(gradient descent)\n \n if(epoch+1) % 1 ==0:\n print('epoch [%d/%d], Loss: %.4f' % (epoch +1, num_epochs, loss.data[0]))\n \n \nmodel.eval()\npredicted = model(Variable(x_train)).data.numpy()\n \nplt.plot(x_train.numpy(), y_train.numpy(),'ro',label='Original Data')\nplt.plot(x_train.numpy(), predicted,label='Fitted Line')\nplt.legend()\nplt.show()","sub_path":"Stale/hedge_neural_net.py","file_name":"hedge_neural_net.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"434782448","text":"import copy\n\nimport numpy as np\nimport pandas as pd\nfrom graphtorch.wiring.utils import create_empty_connection_dataframe\nfrom graphtorch.wiring.parallel import connect_parallel_connections\nfrom graphtorch.wiring import all_to_all\nfrom graphtorch.wiring.utils import split_nodes_by_depth\nfrom graphtorch.wiring.utils import sort_nodes\nfrom graphtorch.wiring.utils import maximum_num_of_connections\nfrom graphtorch.wiring.utils import in_same_depth\n\n\ndef connect_random_connections(\n empty_connection,\n in_dim,\n out_dim,\n split_input_layer,\n split_output_layer,\n layer_sparsity,\n network_sparsity,\n seed,\n max_depth\n):\n assert layer_sparsity <= 1 and layer_sparsity >= 0\n #\n # Input sanity check\n #\n assert empty_connection.shape[0] == empty_connection.shape[1]\n #\n # Create parallel connection object\n #\n random_connection = empty_connection\n #\n # Sample num of depth\n #\n np.random.seed(seed)\n num_nodes = len(random_connection.filter(regex=\"H:\").columns.tolist())\n if max_depth is not None : \n num_depths = np.random.randint(1, min(max_depth, num_nodes))\n else : \n num_depths = np.random.randint(1, num_nodes)\n #\n # Assign node for each depth\n #\n hidden_nodes = empty_connection.filter(regex=\"H:\").columns.tolist()\n all_nodes = empty_connection.columns.tolist()\n num_nodes_used = 0\n num_nodes_in_depth = []\n # Assgin num of nodes per depth\n for idx_depth in range(num_depths - 1):\n num_nodes_min = 1\n num_nodes_max = num_nodes - num_nodes_used - (num_depths - (idx_depth))\n num_nodes_max = max(num_nodes_min, num_nodes_max)\n if num_nodes_min == num_nodes_max : \n num_nodes_to_use = 1\n else : \n num_nodes_to_use = np.random.randint(num_nodes_min, num_nodes_max)\n num_nodes_used += num_nodes_to_use\n num_nodes_in_depth.append(num_nodes_to_use)\n num_nodes_in_depth.append(num_nodes - num_nodes_used)\n #\n # Assign actual name of nodes by given num_nodes_in_depth\n #\n nodes_per_depth = split_nodes_by_depth(hidden_nodes, num_nodes_in_depth)\n #\n # Generate essential wires\n #\n num_total_connections = 0\n # Input layer\n input_nodes = random_connection.filter(regex=\"I:\").columns.tolist()\n all_other_nodes = list(set(all_nodes) - set(input_nodes))\n all_other_nodes = sort_nodes(all_other_nodes)\n for input_node in input_nodes:\n node_to = np.random.choice(all_other_nodes)\n if random_connection.isna().loc[input_node, node_to]:\n random_connection.loc[input_node, node_to] = 1 - layer_sparsity\n num_total_connections += 1\n # Output layer\n output_nodes = random_connection.filter(regex=\"O:\").columns.tolist()\n all_other_nodes = list(set(all_nodes) - set(output_nodes))\n all_other_nodes = sort_nodes(all_other_nodes)\n for output_node in output_nodes:\n node_from = np.random.choice(all_other_nodes)\n if random_connection.isna().loc[node_from, output_node]:\n random_connection.loc[node_from, output_node] = 1 - layer_sparsity\n num_total_connections += 1\n # Hidden layers\n for idx_depth, nodes_in_depth in zip(\n nodes_per_depth.keys(), nodes_per_depth.values()\n ):\n all_nodes_before = all_nodes[: all_nodes.index(nodes_in_depth[0])]\n all_nodes_after = all_nodes[all_nodes.index(nodes_in_depth[-1]) + 1 :]\n for node in nodes_in_depth:\n # Left side\n node_from = np.random.choice(all_nodes_before)\n if random_connection.isna().loc[node_from, node]:\n random_connection.loc[node_from, node] = 1 - layer_sparsity\n num_total_connections += 1\n # Right side\n node_to = np.random.choice(all_nodes_after)\n if random_connection.isna().loc[node, node_to]:\n random_connection.loc[node, node_to] = 1 - layer_sparsity\n num_total_connections += 1\n #\n # Generate additional wires\n #\n num_maximum_connections = maximum_num_of_connections(\n in_dim,\n out_dim,\n split_input_layer,\n split_output_layer,\n parallel=True,\n num_nodes_in_depth=num_nodes_in_depth,\n )\n current_network_sparsity = 1 - (num_total_connections / num_maximum_connections)\n while current_network_sparsity > network_sparsity:\n node_from = np.random.choice(all_nodes)\n all_other_nodes_to = all_nodes[all_nodes.index(node_from) + 1 :]\n if len(all_other_nodes_to) != 0:\n node_to = np.random.choice(all_other_nodes_to)\n if not in_same_depth(\n node1=node_from, node2=node_to, nodes_per_depth=nodes_per_depth\n ):\n if random_connection.isna().loc[node_from, node_to]:\n random_connection.loc[node_from, node_to] = 1 - layer_sparsity\n num_total_connections += 1\n current_network_sparsity = 1 - (\n num_total_connections / num_maximum_connections\n )\n\n return random_connection, nodes_per_depth, num_total_connections\n\n\ndef create_random_connections(\n node_dims,\n in_dim,\n out_dim,\n split_input_layer=False,\n split_output_layer=False,\n layer_sparsity=0,\n network_sparsity=0,\n seed=0,\n max_depth=None\n):\n num_nodes = len(node_dims)\n empty_connection = create_empty_connection_dataframe(\n node_dims, in_dim, out_dim, split_input_layer, split_output_layer,\n )\n (\n random_connection,\n nodes_per_depth,\n num_total_connections,\n ) = connect_random_connections(\n empty_connection,\n in_dim,\n out_dim,\n split_input_layer,\n split_output_layer,\n layer_sparsity,\n network_sparsity,\n seed,\n max_depth\n )\n\n dimension = []\n dimension += [1 for x in range(in_dim)] if split_input_layer else [in_dim]\n dimension += node_dims\n dimension += [1 for x in range(out_dim)] if split_output_layer else [out_dim]\n\n connections = {\n \"connection\": random_connection,\n \"dimension\": dimension,\n \"nodes_per_depth\": nodes_per_depth,\n \"num_total_connections\": num_total_connections,\n }\n\n return connections\n","sub_path":"graphtorch/wiring/random.py","file_name":"random.py","file_ext":"py","file_size_in_byte":6308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"451018513","text":"from datetime import datetime, timedelta, timezone\nfrom pprint import pformat\nfrom time import sleep\n\nfrom actstream import action\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.db import transaction\nfrom django.db.models import Model\nfrom django.utils.functional import cached_property\nfrom django_tqdm import BaseCommand\nfrom opensearchpy import OpenSearch, RequestsHttpConnection\nfrom opensearch_dsl.response import Hit\nfrom phonenumber_field.phonenumber import PhoneNumber\nfrom phonenumbers import NumberParseException\nfrom requests_hawk import HawkAuth\n\nfrom server.apps.main.models import KEY_TYPE, Commit, Consent, LegalBasis\nfrom server.apps.poller.api_client.activity import FormsApi\nfrom server.apps.poller.models import ActivityStreamType\n\n\nclass Command(BaseCommand):\n help = \"\"\"\n Start polling for forms api submissions in activity stream.\n\n e.g. ./manage.py poll_formsapi\n \"\"\"\n\n def add_arguments(self, parser):\n # Named (optional) arguments\n parser.add_argument(\n \"--forever\", action=\"store_true\", help=\"Run in a loop forever\",\n )\n\n parser.add_argument(\n \"--sleep-time\",\n action=\"store\",\n type=int,\n help=\"How long to sleep for (seconds), default: 60\",\n default=60,\n )\n\n @cached_property\n def email_consent(self) -> Consent:\n email_consent, _ = Consent.objects.get_or_create(name=\"email_marketing\")\n return email_consent\n\n @cached_property\n def phone_consent(self) -> Consent:\n phone_consent, _ = Consent.objects.get_or_create(name=\"phone_marketing\")\n return phone_consent\n\n def get_client(self) -> FormsApi:\n es_client = OpenSearch(\n http_auth=HawkAuth(\n id=settings.ACTIVITY_STREAM_ID, key=settings.ACTIVITY_STREAM_KEY\n ),\n connection_class=RequestsHttpConnection,\n port=443,\n use_ssl=True,\n host=settings.ACTIVITY_STREAM_URL.host,\n url_prefix=str(settings.ACTIVITY_STREAM_URL.path),\n )\n\n return FormsApi(es_client=es_client)\n\n def get_activity_instance(self, name: str) -> ActivityStreamType:\n obj, created = ActivityStreamType.objects.get_or_create(name=name)\n if created:\n print(f\"First run for {name}. Creating model {obj}\")\n return obj\n\n @transaction.atomic()\n def update_consent(self, object_data, meta) -> None:\n email_address = object_data.get(\"email_address\", object_data.get(\"email\"))\n email_contact_consent = object_data.get(\"email_contact_consent\") or \"consents_to_email_contact\" in object_data.get(\"contact_consent\", [])\n\n phone_number = object_data.get(\"phone_number\")\n phone_number_country = object_data.get(\"country\")\n phone_consent = object_data.get(\"telephone_contact_consent\")\n\n commit = Commit(extra=meta)\n commit.source = meta[\"url\"] or '' # Not all forms API submissions have an URL\n commit.save()\n\n self._update_email_consent(\n commit,\n email_address,\n email_contact_consent,\n datetime.fromisoformat(meta[\"published\"]).replace(tzinfo=timezone.utc),\n )\n\n self._update_phone_consent(\n commit, phone_consent, phone_number, phone_number_country\n )\n\n def _update_phone_consent(\n self, commit, phone_consent, phone_number, phone_number_country\n ) -> None:\n if phone_number:\n try:\n phone_number_parsed: PhoneNumber = PhoneNumber.from_string(\n phone_number, region=phone_number_country\n )\n phone_number = phone_number_parsed.as_e164\n except NumberParseException:\n pass\n\n obj = LegalBasis(\n phone=phone_number[:128], commit=commit, key_type=KEY_TYPE.PHONE,\n )\n obj.save()\n if phone_consent:\n obj.consents.add(self.phone_consent)\n else:\n obj.consents.remove(self.phone_consent)\n\n self._send_action(obj)\n\n def _send_action(self, instance: Model) -> None:\n User = get_user_model()\n directoryforms_user = User.objects.get(username=\"directoryforms\")\n action_kwargs = {\n \"sender\": directoryforms_user,\n \"action_object\": instance,\n \"verb\": \"Create\",\n }\n action.send(**action_kwargs)\n\n def _update_email_consent(\n self, commit, email_address, email_contact_consent, hit_modified_at\n ) -> None:\n if email_address:\n obj: LegalBasis = LegalBasis(\n email=email_address,\n commit=commit,\n key_type=KEY_TYPE.EMAIL,\n modified_at=hit_modified_at,\n )\n obj.save()\n if email_contact_consent:\n obj.consents.add(self.email_consent)\n else:\n obj.consents.remove(self.email_consent)\n\n self._send_action(obj)\n\n def run(self, *args, **options) -> None:\n client = self.get_client()\n\n obj = self.get_activity_instance(client.name)\n results = client.get_documents(obj.search_after)\n total_hits = results.hits.total.value\n with self.tqdm(total=total_hits) as progress_bar:\n while len(results.hits):\n\n last_hit: Hit\n for hit in results:\n if client.should_process(hit):\n self.write(pformat(hit.to_dict()))\n object_data = client.parse_object_data(hit)\n meta = client.parse_object_meta(hit)\n self.update_consent(object_data, meta)\n progress_bar.update(1)\n\n obj.last_document_timestamp, obj.last_document_id = results.to_dict()[\n \"hits\"\n ][\"hits\"][-1][\"sort\"]\n obj.save()\n\n results = client.get_documents(obj.search_after)\n\n def handle(self, *args, **options):\n run_forever = options.pop(\"forever\")\n sleep_time = options.pop(\"sleep_time\")\n\n if run_forever:\n while True:\n self.write(\"Polling activity stream\")\n self.run(args, options)\n self.write(f\"sleeping until {datetime.now() + timedelta(seconds=60)}\")\n sleep(sleep_time)\n else:\n self.run(args, options)\n","sub_path":"server/apps/poller/management/commands/poll_formsapi.py","file_name":"poll_formsapi.py","file_ext":"py","file_size_in_byte":6530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"234811728","text":"import numpy as np\r\nimport copy\r\n\r\n\r\nfrom constant import *\r\nfrom functools import reduce\r\n\r\ndef softmax(x):\r\n probs = np.exp(x - np.max(x))\r\n probs /= np.sum(probs)\r\n return probs\r\n\r\n\r\nclass TreeNode(object):\r\n \"\"\"\r\n \"\"\"\r\n\r\n def __init__(self, parent, prior_p, state, action):\r\n self._parent = parent\r\n self._children = {} #\r\n self._n_visits = 0\r\n self._state = state\r\n self._Q = 0\r\n self._u = 0\r\n self._P = prior_p\r\n self._action = action\r\n # self._game = game\r\n\r\n def expand(self, action_priors, is_selfplay):\r\n duplicated_node = False\r\n parent_node = None\r\n parent_state = None\r\n\r\n\r\n action_priors = list(action_priors)\r\n #action, prob = zip(*action_priors)\r\n #prob = np.asarray(prob)\r\n noise = np.random.dirichlet(0.3 * np.ones(len(action_priors)))\r\n #prob = prob * 0.8 + noise * 0.2\r\n\r\n for i, (action, prob) in enumerate(action_priors):\r\n \"\"\"\r\n if action < 12:\r\n\r\n # Code for restrict dummy expand\r\n\r\n duplicated_node = False\r\n\r\n # copy game - step action - get state after step(action) end\r\n c_game = copy.deepcopy(game)\r\n c_game.step(action)\r\n next_state = c_game.state()\r\n\r\n # if 'self' is not root node\r\n if self._parent is not None:\r\n parent_node = self._parent # get parent node\r\n parent_state = parent_node._state # get parent node state\r\n\r\n # Compare all states in nodes and next state\r\n while parent_node is not None:\r\n if np.array_equal(parent_state, next_state):\r\n duplicated_node = True\r\n break\r\n else:\r\n # get parent-parent node and parent-parent node state\r\n parent_node = parent_node._parent\r\n if parent_node is not None:\r\n parent_state = parent_node._state\r\n \r\n if not duplicated_node and action not in self._children:\r\n self._children[action] = TreeNode(self, prob, next_state, action)\r\n \"\"\"\r\n\r\n if self._parent is None:\r\n prob = 0.8 * prob + 0.2 * noise[i]\r\n\r\n if action not in self._children:\r\n self._children[action] = TreeNode(self, prob, None, action)\r\n\r\n def select(self, c_puct):\r\n \"\"\"\r\n \"\"\"\r\n\r\n #if np.random.random_sample() < 0.7:\r\n # return reduce(lambda x, y: x if (x[0] < 2 and y[0] >= 2) else x if ((x[0] < 2 and y[0] < 2) and (x[1].get_value(c_puct) > y[1].get_value(c_puct))) else x if (x[1].get_value(c_puct) > y[1].get_value(c_puct)) else y, self._children.items())\r\n\r\n return max(self._children.items(), key=lambda act_node: act_node[1].get_value(c_puct))\r\n\r\n def update(self, leaf_value):\r\n \"\"\"\r\n \"\"\"\r\n self._n_visits += 1\r\n self._Q += 1.0 * (leaf_value - self._Q) / self._n_visits\r\n\r\n def update_recursive(self, reward):\r\n\r\n\r\n if self._parent:\r\n self._parent.update_recursive(-reward)\r\n\r\n self.update(reward)\r\n\r\n def get_value(self, c_puct):\r\n \"\"\"\r\n \"\"\"\r\n self._u = c_puct * self._P * np.sqrt(self._parent._n_visits) / (1 + self._n_visits)\r\n\r\n return self._Q + self._u\r\n\r\n def is_leaf(self):\r\n \"\"\"\r\n \"\"\"\r\n return self._children == {}\r\n\r\n def is_root(self):\r\n return self._parent is None\r\n\r\n def get_parent(self):\r\n return self._parent\r\n\r\n\r\nclass MCTS(object):\r\n \"\"\"\r\n \"\"\"\r\n\r\n def __init__(self, policy_value_fn, c_puct=5, n_playout=1800):\r\n \"\"\"\r\n \"\"\"\r\n self._root = TreeNode(None, 1.0, None, None)\r\n self._policy = policy_value_fn\r\n self._c_puct = c_puct\r\n self._n_playout = n_playout\r\n\r\n # Fix : get current_player param info when the first simulation started.\r\n def _playout(self, game, is_selfplay):\r\n \"\"\"\r\n \"\"\"\r\n node = self._root\r\n while (1):\r\n if node.is_leaf():\r\n break\r\n\r\n action, node = node.select(self._c_puct)\r\n game.step(action) #\r\n # state = game.state()\r\n\r\n action_probs, leaf_value = self._policy(game)\r\n end, winner = game.has_a_winner()\r\n\r\n\r\n if not end:\r\n # Add an incompleted code to make pawn avoid dead-end section.\r\n \"\"\"\r\n if np.sum(game.actions()[:4]) <= 1:\r\n leaf_value = -1.0 if game.get_current_player == current_player else 1.0\r\n else:\r\n \"\"\"\r\n node.expand(action_probs, is_selfplay)\r\n else:\r\n leaf_value = 1.0 if winner == game.get_current_player() else -1.0 # Fix bug that all winners are current player\r\n # print(\"call update\")\r\n\r\n node.update_recursive(-leaf_value)\r\n\r\n def get_move_probs(self, game, temp=1e-3, time_step=0, is_selfplay=0):\r\n \"\"\"\r\n \"\"\"\r\n for n in range(self._n_playout):\r\n game_copy = copy.deepcopy(game)\r\n # state = game.state()\r\n # state_copy = copy.deepcopy(state)\r\n self._playout(game_copy, is_selfplay)\r\n\r\n act_visits = [(act, node._n_visits) for act, node in self._root._children.items()]\r\n acts, visits = zip(*act_visits)\r\n act_probs = softmax(1.0 / temp * np.log(np.array(visits) + 1e-10))\r\n\r\n visits = np.array(visits)\r\n\r\n \"\"\"\r\n if time_step < TAU_THRES:\r\n act_probs = visits / visits.sum()\r\n else:\r\n act_probs = np.zeros(len(visits))\r\n max_idx = np.argwhere(visits == visits.max())\r\n\r\n action_index = max_idx[np.random.choice(len(max_idx))]\r\n act_probs[action_index] = 1\r\n \"\"\"\r\n\r\n # q_vals = [node._Q for act, node in self._root._children.items()]\r\n # print(\"-\" * 30)\r\n # print(\"q_vals : \", q_vals)\r\n # print(\"-\" * 30)\r\n return acts, act_probs\r\n\r\n def update_with_move(self, last_move, state):\r\n if last_move in self._root._children:\r\n self._root = self._root._children[last_move]\r\n self._root._parent = None\r\n else:\r\n self._root = TreeNode(None, 1.0, state, last_move)\r\n\r\n def __str__(self):\r\n return \"MCTS\"\r\n\r\n\r\nclass MCTSPlayer(object):\r\n #\r\n def __init__(self, policy_value_function, c_puct=5, n_playout=2000, is_selfplay=1):\r\n self.mcts = MCTS(policy_value_function, c_puct, n_playout)\r\n self._is_selfplay = is_selfplay\r\n\r\n #\r\n def set_player_ind(self, p):\r\n self.player = p\r\n\r\n #\r\n def reset_player(self):\r\n self.mcts.update_with_move(-1, None)\r\n\r\n # Choose an action during the play\r\n def choose_action(self, game, temp=1e-3, return_prob=0, time_step=0):\r\n sensible_moves = game.actions() # 获取所有可行的落子\r\n move_probs = np.zeros(12 + (BOARD_SIZE - 1) ** 2 * 2) # 获取落子的概率,由神经网络输出\r\n\r\n if len(sensible_moves) > 0: # 棋盘未耗尽时\r\n acts, probs = self.mcts.get_move_probs(game, temp, time_step, self._is_selfplay) # 获取落子以及对应的落子概率\r\n move_probs[list(acts)] = probs # 将概率转到move_probs列表中\r\n state = game.state()\r\n\r\n if self._is_selfplay:\r\n # probs = 0.8 * probs + 0.2 * np.random.dirichlet(0.3 * np.ones(len(probs)))\r\n # move = acts[np.argmax(probs)]\r\n move = np.random.choice(acts, p=probs)\r\n self.mcts.update_with_move(move, state) # 更新根节点,并且复用子树\r\n else:\r\n move = acts[np.argmax(probs)]\r\n # move = np.random.choice(acts, p=probs)\r\n self.mcts.update_with_move(-1, state)\r\n\r\n if return_prob:\r\n return move, move_probs\r\n else:\r\n return move\r\n else:\r\n print(\"WARNING: the board is full\")\r\n\r\n def __str__(self):\r\n return \"MCTS {}\".format(self.player)\r\n","sub_path":"mcts.py","file_name":"mcts.py","file_ext":"py","file_size_in_byte":8244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"289199926","text":"#! python3\n\n# py.exe mcb.pyw save - saves clipboard to keyword\n# py.exe mcb.pyw - loads keyword to clipboard\n# py.exe mcb.pyw list - loads all keywords to clipboard\n\n# EXPLAINER FOR MYSELF\n# with sys.argv, the filename (mcb.pyw) is argv[0], so sys.argv == 3\n# because we have the filename, save, and the keywoard,\n# which are [0], [1], and [2], which are 3 total arguments.\n\n# the other two usage situation -- and list --\n# fall under sys.argv == 2, because we only have two arguments:\n# the filename and or list. filename is always [0]\n# and then or list are at [1], which is a total of\n# two arguments\n\nimport shelve, pyperclip, sys\nmcbShelf = shelve.open('mcb')\n\nif len(sys.argv) == 3 and sys.argv[1].lower() == 'save':\n mcbShelf[sys.argv[2]] = pyperclip.paste()\nif len(sys.argv) == 3 and sys.argv[1].lower() == 'delete':\n del mcbShelf[sys.argv[2]]\nelif len(sys.argv) == 2:\n if sys.argv[1].lower() == 'list':\n mcbShelfValueList = list(mcbShelf.values())\n if mcbShelfValueList == None:\n sys.exit()\n else:\n for i in range(len(mcbShelfValueList)):\n tempValueStr = f\"{mcbShelfValueList[i]}\\n\"\n if i == 0:\n totalString = ''\n totalString = totalString + tempValueStr\n else:\n totalString = totalString + tempValueStr\n pyperclip.copy(totalString)\n elif sys.argv[1] in mcbShelf:\n pyperclip.copy(mcbShelf[sys.argv[1]])\n\nmcbShelf.close()\n","sub_path":"mcb.pyw","file_name":"mcb.pyw","file_ext":"pyw","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"449120477","text":"import os\nimport math\n#Import the necessary packages\n\n\ndef sim_int(p,n,r): \n si = p*n*r/100\n print(\"The Simple Interest is\",si)\n print(dir())\n\ndef ceil_floor(a,b):\n \n print(math.ceil(eval(a)))\n print(math.floor(eval(b)))\n\nfrom math import sqrt\ndef lists():\n lst=[]\n for i in range(5):\n lst.append(round(sqrt(int(input())), 2))\n print(lst)\n print(type(lst[-1]))\n \n# Driver code\np= int(input())\nn= int(input())\nr= int(input()) \nsim_int(p,n,r)\n\n\na=input()\nb=input()\nceil_floor(a,b)\n\nlists()\n","sub_path":"tryout/basic/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"494201723","text":"import csv\ndef ejemplo(a,b,c):\n return [c,b,a]\n\ndef buscarTabla(tabla,caracter,estadoActual):\n a=0\n b=0\n for i in range(len(tabla[0])):\n if(caracter==tabla[0][i]):\n a=i\n for i in range(len(tabla)):\n if(estadoActual==tabla[i][0]):\n b=i\n return tabla[b][a].split(\",\")\n\ndef EjecucionAutomata(cadena,tabla,estadoInicial,estadosFinales):\n estadoActual=estadoInicial\n cabezal=0\n while((not estadoActual in estadosFinales) and (not estadoActual==\"e\")):\n [estadoActual,escribir,movimiento]=buscarTabla(tabla,cadena[cabezal],estadoActual)\n ##Acciones de mi cinta\n cadena=cadena[:cabezal]+escribir+cadena[cabezal+1:]\n if movimiento==\"R\":\n cabezal+=1\n else: \n if (movimiento==\"L\"):\n cabezal-=1\n if(cabezal<0):\n cadena=\"#\"+cadena\n cabezal+=1\n if(cabezal>len(cadena)-1):\n cadena+=\"#\"\n \n if (estadoActual in estadosFinales):\n print(\"Cadena Aceptada\") \n print(\"Resultado \"+cadena.replace(\"#\",\"\"))\n \n else:\n print(\"Cadena Inválida\")\n \ndef cargarMaquina(direccion):\n estadoInicial=None\n estadosFinales=[]\n tabla=[]\n with open(direccion, newline='') as File: \n reader = csv.reader(File)\n i = 0\n for row in reader:\n if (i==0):\n estadoInicial=row[0]\n elif (i==1):\n for elemento in row:\n if(not elemento == ''):\n estadosFinales.append(elemento)\n else:\n filaTabla=[]\n for elemento in row:\n if(not elemento == ''):\n elemento=elemento.replace(\";\", \",\")\n filaTabla.append(elemento)\n tabla.append(filaTabla)\n \n i+=1\n return tabla,estadoInicial,estadosFinales\n\ntabla=[[\"e\", \"0\", \"1\", \"#\"],\n [\"q1\",\"q1,1,R\",\"q1,0,R\",\"q2,#,L\"],\n [\"q2\",\"e,#,#\",\"e,#,L\"]]\n\n","sub_path":"MaquinaTuring.py","file_name":"MaquinaTuring.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"379355995","text":"from datetime import datetime\nfrom flask import make_response,abort,request\nfrom config import db,basedir\nfrom flask_restful import reqparse, abort, Api, Resource\nimport os\nimport logging, logging.config, yaml\nimport base64\nfrom os.path import expanduser\nfrom datetime import date, datetime\nfrom scan_aadhar import qr_scan\nfrom face_detect import detect_faces\nfrom aadharvision import detect_text\n\nhome = expanduser('~')\n#print(\"home////////////\",home)\ndate = str(date.today())\n\n\n\n\n\nCONFIG_PATH = os.path.join(basedir,'loggeryaml/aadharlogger.yaml')\nlogging.config.dictConfig(yaml.load(open(CONFIG_PATH)))\nlogger = logging.getLogger('post_aadhar')\n\n\n\nclass ScanAadhar(Resource):\n\n def __init__(self):\n pass\n\n\n def post(self):\n try:\n file = request.form\n base=file['aadhar_image']\n #print(\"dfgdf:\",base)\n imgdata = base64.b64decode(base)\n filename = home +'/'+'aadhar.jpeg' # I assume you have a way of picking unique filenames\n with open(filename, 'wb') as f:\n f.write(imgdata)\n #image = request.files['aadhar_image']\n details =None\n try:\n print(\"tiger1\")\n details=qr_scan(filename)\n print(\"details:\",details)\n except:\n print(\"xkbvdk\")\n\n try:\n print(\"tiger2\")\n details = detect_text(base)\n print(\"details:\",details)\n except:\n print(\"tiger3\")\n image_string = ' '\n try:\n print(\"face_detect\")\n face = detect_faces(filename)\n os.remove(filename)\n with open(face, 'rb') as image:\n image_string = base64.b64encode(image.read()).decode()\n faceimage_size = ('{:,.0f}'.format(os.path.getsize(face)/float(1<<10))+\" KB\")\n os.remove(face)\n except:\n print(\"not drawn:\")\n\n\n #print(\"sdfd:\",image_string)\n\n if details == None:\n logger.warning(\"unable to extract the details from qrcode\")\n return ({\"success\":False,\"message\":\"unable to read the image, please enter the details manually\"})\n elif len(details)>0:\n details['face']=image_string\n logger.info(\"Data successfully extracted from qr code\")\n return ({\"success\":True,\"aadhar_details\":details})\n\n #image_string = base64.b64encode(image.read()).decode()\n\n except IndexError as e:\n logger.warning(\"unable to extract the details from qrcode\")\n return ({\"message\":\"unable to read the image, please enter the details manually\",\"success\":False})\n except Exception as e:\n logger.warning(\"unable to extract the details from qrcode\")\n return ({\"message\":\"unable to read the image, please enter the details manually\",\"success\":False})\n","sub_path":"scanocr/app/aadhar_scan/post_aadhar.py","file_name":"post_aadhar.py","file_ext":"py","file_size_in_byte":3030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"571496802","text":"#!/usr/bin/env python\n\"\"\"\n This program is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n You should have received a copy of the GNU General Public License\n along with this program. If not, see [http://www.gnu.org/licenses/].\n Inspiration: https://github.com/studywolf/blog/tree/master/RL\n\"\"\"\nimport random\nimport collections\nfrom collections import OrderedDict\n\n\nclass QL:\n def __init__(self, epsilon, seed, alpha=0.1, gamma=0.9):\n self.q = {}\n self.epsilon = epsilon\n self.alpha = alpha\n self.gamma = gamma\n self.seed = seed\n self.exploration = False\n #print \"\\n\\ne=%f, seed=%s, alpha=%f, gamma=%f\" % (epsilon, seed, alpha, gamma)\n\n def getQ(self, state, action):\n return self.q.get((state, action), 0.0)\n #return self.q.get((state, action), 1.0)\n\n def update(self, state, action, reward, value):\n oldv = self.q.get((state, action), None)\n if oldv is None:\n #print \"utility(%r,%r) = %f\" %(state, action, reward)\n self.q[(state, action)] = reward\n else:\n self.q[(state, action)] = oldv + self.alpha * (value - oldv)\n\n def chooseAction(self, state, actions):\n #random.seed(self.seed) #for reproducibility \n if (random.random() < self.epsilon): #exploration\n action = random.choice(actions) \n self.exploration = True\n else: #exploitation\n self.exploration = False\n if (self.q == {}):\n action = random.choice(actions)\n else:\n q = [self.getQ(state, a) for a in actions]\n maxQ = max(q)\n count = q.count(maxQ) \n if count > 1:\n ax = []\n for a in actions:\n if self.getQ(state, a) == maxQ:\n ax.append((state,a))\n action = random.choice(ax)[1]\n else:\n for a in actions:\n if self.getQ(state, a) == maxQ:\n action = a\n return action\n\n \n #QL\n def learnQL(self, state1, action1, reward, state2, actions):\n maxqnew = max([self.getQ(state2, a) for a in actions])\n self.update(state1, action1, reward, reward + self.gamma*maxqnew)\n\n def printQ(self):\n od = collections.OrderedDict(sorted(self.q.items()))\n for (key, val) in od.items():\n print(\"%r:%r\" % (key, val))\n\n\n\n\n\n\n\n\n","sub_path":"lens2.0/rl/QL.py","file_name":"QL.py","file_ext":"py","file_size_in_byte":2939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"483561135","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@file : teacher_interface.py\n@author : Andy Zhang\n@time : 2020/10/15 15:22\n@Desc : \n\"\"\"\nfrom db import models\n\n\ndef teach_register(name, pwd):\n if models.Teacher.select(name):\n return False, '教师已存在'\n teach_obj = models.Teacher(name, pwd)\n teach_obj.save()\n return True, f'教师{name}注册成功'\n\n\ndef check_sourse_control(user):\n teach_obj = models.Teacher.select(user)\n if teach_obj.teacher_courses:\n return teach_obj.teacher_courses\n\n\ndef add_course_control(course, user):\n teach_obj = models.Teacher.select(user)\n if course in teach_obj.teacher_courses:\n return False, '课程已存在'\n teach_obj.add_course(course)\n return True, f'课程{course}添加成功'\n\n\ndef modify_score_control(teacher, stu, course, num):\n teach_obj = models.Teacher.select(teacher)\n teach_obj.modify_score(stu, course, num)\n return f'学生{stu}的课程{course}修改成功,修改后的分数为:{num}'\n","sub_path":"interface/teacher_interface.py","file_name":"teacher_interface.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"356634154","text":"# -*- coding: utf-8 -*-\nimport weakref\nimport re\nimport sqlalchemy as sa\nfrom sqlalchemy.orm import object_session\nfrom sqlalchemy.orm.util import has_identity\nfrom sqlalchemy.ext.declarative import declared_attr, DeclarativeMeta, base\nfrom alchemist.conf import settings\n\n\ndef _package_of(module):\n \"\"\"Gets the root package name of the passed `models` package.\n \"\"\"\n\n # Get the registered package this model belongs to.\n package = module.split('.')\n while package:\n # Is this name a registered package?\n test = '.'.join(package)\n if test in settings.get('PACKAGES', []):\n # This is the package we are in.\n return test\n\n # Remove the right-most segment.\n package.pop()\n\n if not package:\n # No package was found to be registered; attempt to guess the\n # right package name; strip all occurrances of '.models' from the\n # pacakge name.\n return module.replace('.models', '')\n\n\n#! Dictionary of metadata classes; keyed by package.\n_model_metadata = {}\n\n\n#! Dictionary of class registries; keyed by package.\n_model_registry = {}\n\n\nclass ModelBase(DeclarativeMeta):\n\n @property\n def _decl_class_registry(self):\n return getattr(self, '_registry', None)\n\n @property\n def query(self):\n \"\"\"Create an object session and return the query object.\"\"\"\n return __import__('alchemist.db').db.session.query(self)\n\n def __new__(cls, name, bases, attrs):\n # Don't process further if this is the base.\n for base in bases:\n if isinstance(base, ModelBase):\n # This is twice derived.\n break\n\n # Nope; this is the base.\n else:\n return super().__new__(cls, name, bases, attrs)\n\n # Check for existing metadata.\n package = _package_of(attrs['__module__'])\n if package not in _model_registry:\n _model_registry[package] = weakref.WeakValueDictionary()\n _model_metadata[package] = sa.MetaData()\n\n # Set new registry.\n attrs['_registry'] = _model_registry[package]\n\n # Add metadata and registry to the attributes.\n attrs['metadata'] = _model_metadata[package]\n\n # Continue processing.\n return super().__new__(cls, name, bases, attrs)\n\n\nclass Model(metaclass=ModelBase):\n \"\"\"Declares the base model.\n\n This provides various helpers, defaults, and utilities for\n sqlalchemy-derived models.\n \"\"\"\n\n __abstract__ = True\n\n __init__ = base._declarative_constructor\n\n @declared_attr\n def __tablename__(cls):\n \"\"\"\n Underscorizes the class name combined with the pacakge name in order\n to form a normal name for a table in SQL.\n \"\"\"\n package = _package_of(cls.__module__).lower()\n name = '%s.%s' % (package, cls.__name__.lower())\n name = re.sub(r'([A-Z])', r'_\\1', name)\n name = re.sub(r'\\.+', r'_', name)\n return name\n\n def save(self, session=None, commit=True):\n \"\"\"\n Save the changes to the model. If the model has not been persisted\n then it adds the model to the declared session. Then it flushes the\n object session and optionally commits it.\n\n @param[in] session\n A specific session to use instead of the thread-local, scoped\n session.\n \"\"\"\n if has_identity(self):\n # Object has already been persisted to the database; grab its\n # session.\n session = object_session(self)\n\n else:\n # Ensure we have a database session.\n if session is None:\n session = __import__('alchemist.db').db.session\n\n # Object has not been persisted to the database.\n session.add(self)\n\n if commit:\n # Commit the session as requested.\n session.commit()\n\n else:\n # Just flush the session; do not commit.\n session.flush()\n","sub_path":"src/alchemist/db/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"454330087","text":"import Connect\n\n\ndef Add_details(x):\n conn = Connect.connection(x.databasename)\n cursor = conn.cursor()\n\n cursor.execute(\"insert into Patient_info(Name,Age,Disease,Contactno,Mailid,Roomno,Uniqueid ) values (?, ?, ?, ?, ?, \"\n \"?, ?)\", x.name,x.age,x.disease,x.contactno,x.mailid,x.roomno,\n x.uniqueid)\n conn.commit()\n","sub_path":"Code to admit a patient/add_details.py","file_name":"add_details.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"518289259","text":"'''\nYour function should take in a single parameter (a string `word`)\nYour function should return a count of how many occurences of ***\"th\"*** occur within `word`. Case matters.\nYour function must utilize recursion. It cannot contain any loops.\n'''\ndef count_th(word):\n \n # not going to find th in a word less than 2\n if (len(word) < 2): \n return 0\n\n # count starts at zero cause no values can be found\n count = 0\n\n # if the first and second letters are 'th', increase counter\n if word[0] == 't' and word[1] == 'h':\n count = 1\n\n # slide word over and recurse, \n count = count + count_th(word[1:])\n\n return count\n","sub_path":"recursive_count_th/count_th.py","file_name":"count_th.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"79102146","text":" \nimport time\nimport base64\nimport json\nfrom typing import Callable\nfrom sqlalchemy import exc\n\nfrom fastapi.exceptions import RequestValidationError\nfrom fastapi import HTTPException, Request, Response\nfrom fastapi.routing import APIRoute\n\nfrom app.configs.environment import Config\nfrom app.database.session import SessionLocal\n\nfrom app.exceptions.fast_api_validation import ValidationException\nfrom app.exceptions.fast_api_custom import CustomException\n\nfrom app.utils.jwt import jwt_decode\n\nclass AuthDbMiddleware(APIRoute):\n def get_route_handler(self) -> Callable:\n original_route_handler = super().get_route_handler()\n\n async def custom_route_handler(request: Request) -> Response:\n \n ## Token\n # request_authorization = request.headers.get('Authorization', None)\n # if not request_authorization:\n # raise CustomException(status_code=412, detail=\"Se requiere encabezados de autorización\",\n # type=\"headers\", code=995)\n \n try:\n \n ## Token decode\n # token = jwt_decode(request_authorization)[1]\n # token = json.loads(token)\n # request.state.token = token\n request.state.token = {\n 'sub':'eaa58cb4-62b5-4fe6-88b6-99b4cf14f3a3'\n }\n \n ## Session DB\n request.state.db = SessionLocal()\n \n ## Time Response\n before = time.time()\n response: Response = await original_route_handler(request)\n duration = time.time() - before\n response.headers[\"X-Response-Time\"] = str(duration)\n \n return response\n \n except exc.SQLAlchemyError as err:\n print(\"SQLAlchemyError: \",err)\n if hasattr(request.state, \"db\"):\n request.state.db.close()\n \n type_error = type(err).__name__\n if err.__dict__.get('orig', None):\n detail_error = str(err.__dict__['orig'])\n \n if type_error == 'IntegrityError':\n if 'violates unique constraint' in detail_error:\n indice_c = detail_error.index('\"')\n subcadena = str(detail_error[indice_c:].replace('\"',''))\n subcadena = subcadena.split('_')\n raise HTTPException(status_code=409, detail='Problemas al registrar. {} ya existe.'.format(subcadena[1]))\n \n elif type_error == 'OperationalError':\n raise HTTPException(status_code=504, detail='Tenemos problemas de conectividad')\n\n raise HTTPException(status_code=400, detail='Tenemos algunos inconvenientes')\n \n except RequestValidationError as err:\n print(\"RequestValidationError: \",err.errors())\n if hasattr(request.state, \"db\"):\n request.state.db.close()\n \n # body = await request.body()\n # detail = {\"errors\": err.errors(), \"body\": body.decode()}\n raise ValidationException(manual=err.errors())\n \n except CustomException as err:\n if hasattr(request.state, \"db\"):\n request.state.db.close()\n \n raise CustomException(status_code=err.status_code, \n detail=err.detail,\n type=err.type,\n code=err.code)\n \n except Exception as err:\n if hasattr(request.state, \"db\"):\n request.state.db.close()\n \n print(\"Exception: !: \",err)\n raise HTTPException(status_code=500, detail='Tenemos algunos inconvientes, inténtelo mas tarde')\n \n finally:\n if hasattr(request.state, \"db\"):\n request.state.db.close()\n \n return custom_route_handler","sub_path":"app/middlewares/auth_db.py","file_name":"auth_db.py","file_ext":"py","file_size_in_byte":4285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"195811652","text":"# %load q02_data_splitter/build.py\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import TimeSeriesSplit\nfrom greyatomlib.time_series_day_02_project.q01_load_data.build import q01_load_data\n\ndef q02_data_splitter(path):\n seed=9\n shape,df = q01_load_data(path)\n tssf= TimeSeriesSplit(n_splits=3)\n trainl=()\n validl=()\n for train_index,valid_index in tssf.split(df):\n trainl=trainl+ tuple(train_index)\n validl=validl + tuple(valid_index)\n return [[trainl,trainl],[validl,validl]]\n\n\n\n","sub_path":"q02_data_splitter/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"455487736","text":"import os\nimport tempfile\nimport shutil\nimport logging\n\nfrom abc import ABC, abstractmethod\nfrom mako.template import Template\n\nfrom .__k8s__ import have_api_resource, create_api_resource\n\nsettings = {}\n\n\nclass Pam(ABC):\n def __init__(self):\n self._dependents_ = []\n self._settings_ = {}\n self._name_ = 'None'\n\n @property\n def get_name(self):\n return self._name_\n\n @abstractmethod\n def set_name(self, name):\n self._name_ = name\n\n def __render__(self, file_path, result_path, attributes):\n if have_api_resource('monte', 'deploy'):\n logging.warning('already installed, do nothing')\n return\n template = Template(filename=file_path)\n rs = template.render(**attributes)\n if rs is None or len(rs) < 1:\n return\n\n print(rs)\n with open(result_path, \"w\") as file:\n file.write(rs)\n\n create_api_resource(result_path)\n\n def render(self, template_dir):\n tmp_dir = tempfile.mkdtemp()\n try:\n for file_name in os.listdir(template_dir):\n file_path = os.path.join(template_dir, file_name)\n result_path = os.path.join(tmp_dir, file_name)\n attributes = {}\n attributes.update(settings)\n attributes.update(self.get_settings())\n self.__render__(file_path, result_path, attributes)\n\n break\n finally:\n shutil.rmtree(tmp_dir)\n\n def get_settings(self):\n return self._settings_\n\n def update_settings(self, settings):\n self._settings_.update(settings)\n\n def replace_settings(self, settings):\n self._settings_.clear()\n self._settings_.update(settings)\n\n def get_dependents(self):\n return self._dependents_\n\n def update_dependents(self, dependents):\n self._dependents_.update(dependents)\n\n def replace_dependents(self, dependents):\n self._dependents_.clear()\n self._dependents_.update(dependents)\n","sub_path":"src/pam/__pam__.py","file_name":"__pam__.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"526660292","text":"import pickle\nimport os\nfrom save_file import SaveFile\n\nclass SaveManager:\n \"\"\" Handles the saving and loading of player data \"\"\"\n def __init__(self, game):\n self.game = game\n self.latest_save_folder = None\n self.latest_save = None\n\n def create_save(self, save_folder, save_name):\n \"\"\" Create a save file under given save folder with given name \"\"\"\n save_file = SaveFile(self.game)\n with open(\"saves/\" + save_folder + \"/\" + save_name, \"wb\") as file:\n pickle.dump([save_file], file, protocol=2)\n\n self.save_latest_save_info(save_folder, save_name)\n\n def load_save(self, save_folder, save_name):\n \"\"\" Load a given save file from a given follder \"\"\"\n with open(\"saves/\" + save_folder + \"/\" + save_name, \"rb\") as file:\n [save_file] = pickle.load(file)\n save_file.load_data(self.game)\n self.game.change_menu(\"location_menu\")\n self.game.in_game = True\n self.save_latest_save_info(save_folder, save_name)\n\n def load_latest_game(self):\n \"\"\" Load the latest save from the latest save folder \"\"\"\n self.load_save(self.latest_save_folder, self.latest_save)\n\n def get_saves(self, folder_name):\n \"\"\" Get all dat files within given save folder \"\"\"\n saves = []\n folder_path = \"saves/\" + folder_name\n if os.path.isdir(folder_path):\n directory = os.fsencode(folder_path)\n for file in os.listdir(directory):\n filename = os.fsdecode(file)\n if filename.endswith(\".dat\"):\n saves.append(filename)\n return saves\n\n def get_save_folders(self):\n \"\"\" Get all save folders in the saves directory \"\"\"\n save_folders = []\n for directory_name in next(os.walk(\"saves/\"))[1]:\n save_folders.append(directory_name)\n return save_folders\n\n def create_save_folder(self, folder_name):\n \"\"\" Create a unique save folder to store saves \"\"\"\n if os.path.isdir(\"saves/\" + folder_name):\n i = 1\n while True:\n test_folder = folder_name + \"(\" + str(i) + \")\"\n if not os.path.isdir(\"saves/\" + test_folder):\n folder_name = test_folder\n break\n i += 1\n os.mkdir(\"saves/\" + folder_name)\n self.create_save(folder_name, \"autosave.dat\")\n\n def save_latest_save_info(self, folder, save):\n \"\"\" Save the latest save folder and file \"\"\"\n self.latest_save_folder = folder\n self.latest_save = save\n with open(\"saves/latest_save.dat\", \"wb\") as file:\n pickle.dump([self.latest_save_folder, self.latest_save],\n file, protocol=2)\n\n def load_latest_save_info(self):\n \"\"\" Try to get the latest save folder and file \"\"\"\n self.latest_save_folder, self.latest_save = None, None\n save_folder, save_file = None, None\n if os.path.isfile(\"saves/latest_save.dat\"):\n with open(\"saves/latest_save.dat\", \"rb\") as file:\n [save_folder, save_file] = pickle.load(file)\n\n if os.path.isdir(\"saves/\" + str(save_folder)):\n self.latest_save_folder = save_folder\n if os.path.isfile(\"saves/\" + str(save_folder) + \"/\" + str(save_file)):\n self.latest_save = save_file\n","sub_path":"save_manager.py","file_name":"save_manager.py","file_ext":"py","file_size_in_byte":3358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"297358558","text":"\"\"\"Test shopping list component.\"\"\"\nimport asyncio\n\nfrom homeassistant.bootstrap import async_setup_component\nfrom homeassistant.helpers import intent\n\n\n@asyncio.coroutine\ndef test_add_item(hass):\n \"\"\"Test adding an item intent.\"\"\"\n yield from async_setup_component(hass, 'shopping_list', {})\n\n response = yield from intent.async_handle(\n hass, 'test', 'HassShoppingListAddItem', {'item': {'value': 'beer'}}\n )\n\n assert response.speech['plain']['speech'] == \\\n \"I've added beer to your shopping list\"\n\n\n@asyncio.coroutine\ndef test_recent_items_intent(hass):\n \"\"\"Test recent items.\"\"\"\n yield from async_setup_component(hass, 'shopping_list', {})\n\n yield from intent.async_handle(\n hass, 'test', 'HassShoppingListAddItem', {'item': {'value': 'beer'}}\n )\n yield from intent.async_handle(\n hass, 'test', 'HassShoppingListAddItem', {'item': {'value': 'wine'}}\n )\n yield from intent.async_handle(\n hass, 'test', 'HassShoppingListAddItem', {'item': {'value': 'soda'}}\n )\n\n response = yield from intent.async_handle(\n hass, 'test', 'HassShoppingListLastItems'\n )\n\n assert response.speech['plain']['speech'] == \\\n \"These are the top 5 items in your shopping list: soda, wine, beer\"\n\n\n@asyncio.coroutine\ndef test_api(hass, test_client):\n \"\"\"Test the API.\"\"\"\n yield from async_setup_component(hass, 'shopping_list', {})\n\n yield from intent.async_handle(\n hass, 'test', 'HassShoppingListAddItem', {'item': {'value': 'beer'}}\n )\n yield from intent.async_handle(\n hass, 'test', 'HassShoppingListAddItem', {'item': {'value': 'wine'}}\n )\n\n client = yield from test_client(hass.http.app)\n resp = yield from client.get('/api/shopping_list')\n\n assert resp.status == 200\n data = yield from resp.json()\n assert data == ['beer', 'wine']\n","sub_path":"tests/components/test_shopping_list.py","file_name":"test_shopping_list.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"320742368","text":"class Student(object):\n @property #@property make the below function just like a property of the class\n def score(self):\n return self._score\n \n @score.setter #@func.setter make the function just like a property of the class which can be assigned a value.\n def score(self, value):\n if not isinstance(value, int):\n raise ValueError('score must be an integer!')\n if value < 0 or value > 100:\n raise ValueError('score must between 0 ~ 100!')\n self._score = value\n\n \n","sub_path":"property_func.py","file_name":"property_func.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"207840050","text":"import setuptools\n\nwith open(\"readme.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"genomicinfo\",\n version=\"0.0.1\",\n author=\"Rishab Mallick\",\n author_email=\"rishabmallick6@gmail.com\",\n description=\"Extract genomic variants from scientific articles\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/rishabgit/genomic-info-from-papers\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.6',\n install_requires=[\n 'accelerate==0.3.0',\n 'argcomplete==2.0.0',\n 'argh==0.26.2',\n 'bcrypt==3.2.2',\n 'beautifulsoup4==4.11.1',\n 'bs4==0.0.1',\n 'certifi==2021.10.8',\n 'cffi==1.15.0',\n 'chardet==4.0.0',\n 'charset-normalizer==2.0.12',\n 'click==8.1.3',\n 'cryptography==37.0.2',\n 'datasets==1.11.0',\n 'dill==0.3.4',\n 'fabric==2.5.0',\n 'filelock==3.7.0',\n 'fsspec==2022.3.0',\n 'gensim==3.8.3',\n 'gffutils==0.10.1',\n 'huggingface-hub==0.0.12',\n 'idna==3.3',\n 'invoke==1.7.1',\n 'joblib==1.1.0',\n 'multiprocess==0.70.12.2',\n 'nervaluate==0.1.8',\n 'nltk==3.6.3',\n 'numpy==1.19.5',\n 'packaging==21.3',\n 'pandas==1.3.5',\n 'paramiko==2.10.4',\n 'pdfminer.six==20201018',\n 'psycopg2-binary==2.9.3',\n 'pyaml==21.10.1',\n 'pyarrow==8.0.0',\n 'pybedtools==0.9.0',\n 'pycparser==2.21',\n 'pyfaidx==0.6.4',\n 'PyNaCl==1.5.0',\n 'pyparsing==3.0.9',\n 'pysam==0.19.0',\n 'python-dateutil==2.8.2',\n 'pytz==2021.3',\n 'PyYAML==6.0',\n 'regex==2020.10.28',\n 'requests==2.27.1',\n 'sacremoses==0.0.53',\n 'scikit-learn==1.1.0',\n 'scipy==1.8.0',\n 'seqeval==1.2.2',\n 'simplejson==3.17.6',\n 'six==1.16.0',\n 'smart-open==6.0.0',\n 'sortedcontainers==2.4.0',\n 'soupsieve==2.3.2.post1',\n 'threadpoolctl==3.1.0',\n 'tokenizers==0.10.3',\n 'torch==1.9.0',\n 'tqdm==4.64.0',\n 'transformers==4.9.1',\n 'typing_extensions==4.2.0',\n 'urllib3==1.26.9',\n 'wbtools==1.3.0',\n 'xxhash==3.0.0'\n ]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"400238564","text":"import json\nimport keras\nfrom keras.models import model_from_json\nimport numpy as np\n\nimport constants\nfrom keras_model import KerasModel\nfrom utils import Utils\n\n\"\"\"\nfrom model_analysis import ModelAnalysis\nModelAnalysis.analyze('experiments/experiment_27.json', 'experiments/experiment_27.h5')\n\"\"\"\nclass ModelAnalysis():\n\n def analyze(json_path, weight_path):\n dev_images, dev_labels = KerasModel.load_images_and_labels(constants.FULL_SQUAT_DEV_FOLDER)\n image_names = Utils.get_image_names(constants.FULL_SQUAT_DEV_FOLDER)\n \n model = ModelAnalysis.load_model(json_path, weight_path)\n predictions = model.predict_on_batch(dev_images)\n prediction_labels = []\n for prediction in predictions:\n prediction_labels.append(np.argmax(prediction))\n \n for i in range(len(dev_labels)):\n if dev_labels[i] != prediction_labels[i]:\n print(\"{} label: {} predict: {}\".format(image_names[i], dev_labels[i], prediction_labels[i]))\n\n def load_model(json_path, weight_path):\n with open(json_path) as f:\n model_json = f.read()\n\n model = model_from_json(model_json)\n model.load_weights(weight_path)\n return model\n","sub_path":"model_analysis.py","file_name":"model_analysis.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"302071977","text":"import numpy as np\r\nfrom util import get_normalized_data, y2indicator, forward_prop, derivative_b1, derivative_w1, derivative_b2, derivative_w2, error_rate, cost, forward_relu, get_test_data\r\n\r\ndef batch_grad():\r\n\r\n #get data and for test and train sets\r\n X,Y = get_normalized_data()\r\n #XTrain = X[:-1000, :]\r\n #YTrain = Y[:-1000]\r\n #YTrain_ind = y2indicator(YTrain)\r\n #XTest = X[-1000:, :]\r\n #YTest = Y[-1000:]\r\n # = y2indicator(YTest)\r\n Y_ind = y2indicator(Y)\r\n\r\n batchSz = 500\r\n #Initialize random weights\r\n N, D = X.shape\r\n K = len(set(Y))\r\n M = 300\r\n W1 = np.random.randn(D, M)\r\n b1 = np.random.randn(M)\r\n W2 = np.random.randn(M, K)\r\n b2 = np.random.randn(K)\r\n\r\n learning_rate = 10e-5\r\n reg = 0.01\r\n\r\n no_batches = int(N/batchSz)\r\n print(\"No of bathces: \", no_batches)\r\n for i in range(300):\r\n for n in range(no_batches):\r\n #get current batch\r\n XBatch = X[n*batchSz:(n*batchSz + batchSz), :]\r\n YBatch_ind = Y_ind[n*batchSz:(n*batchSz + batchSz), :]\r\n #Forward prop\r\n pY, Z = forward_relu(XBatch, W1, b1, W2, b2)\r\n\r\n #Backprop\r\n W2 += learning_rate * (derivative_w2(pY, YBatch_ind, Z) + reg*W2)\r\n b2 += learning_rate * (derivative_b2(pY, YBatch_ind) + reg*b2)\r\n W1 += learning_rate * (derivative_w1(pY, YBatch_ind, W2, Z, XBatch) + reg*W1)\r\n b1 += learning_rate * (derivative_b1(pY, YBatch_ind, W2, Z) + reg*b1)\r\n\r\n if n%100 == 0:\r\n #Forward prop\r\n #pY, Z = forward_relu(XBatch, W1, b1, W2, b2)\r\n YBatch = Y[n*batchSz:n*batchSz + batchSz]\r\n P = np.argmax(pY, axis=1)\r\n er = error_rate(P, YBatch)\r\n\r\n c = cost(YBatch_ind, pY)\r\n print(\"Loop: \", i, n, \"Error rate: \", er, \"Cost: \", c )\r\n \r\n \r\n pY, Z = forward_relu(X, W1, b1, W2, b2)\r\n p = np.argmax(pY, axis=1)\r\n print(\"Final training error rate: \", error_rate(p, Y))\r\n\r\n XTest = get_test_data()\r\n pY, ZTest = forward_relu(XTest, W1, b1, W2, b2)\r\n YTest = np.argmax(pY, axis=1)\r\n\r\n f = open(\"test_result.csv\",\"w\")\r\n f.write(\"ImageId,Label\\n\")\r\n n = YTest.shape[0]\r\n for i in range(n):\r\n f.write(str(i+1) + \",\" + str(YTest[i]) + \"\\n\")\r\n f.close()\r\n\r\ndef main():\r\n batch_grad()\r\n #X = get_test_data()\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n\r\n\r\n","sub_path":"BatchGD_with_regularization.py","file_name":"BatchGD_with_regularization.py","file_ext":"py","file_size_in_byte":2458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"615710742","text":"'''Python file to cluster outbreaks and calculate CFR'''\nimport pandas as pd\nfrom datetime import date\nimport numpy as np\n\ndef time_delta(X):\n '''Function to calculate time difference between two dates'''\n d1 = date(2020, 3, 25)\n a = X.split(\"-\")\n year = int(a[0])\n month = int(a[1])\n day = int(a[2])\n d0 = date(year,month,day)\n delta = d1 - d0\n return delta\n\ndef main():\n outbreaks = pd.read_csv('../WHO_data_extraction/Outbreaks.csv')\n diseases = pd.read_csv('diseases.csv')\n incubation = dict(zip(list(diseases['Disease']), list(diseases['Maximum_Incubation'])))\n\n df = pd.merge(outbreaks, diseases, how='inner', on=['Disease'])\n df['days'] = df['Date'].apply(time_delta)\n df['outbreak_cluster'] = 0\n df.to_csv('cfr_df.csv')\n df = df.sort_values(['days'], ascending=[1])\n \n countries = list(df['Country'].unique())\n diseases_list = list(df['Disease'].unique())\n country_disease = {}\n count = 0\n\n df_cfr = pd.DataFrame(columns = ['Country', 'Date', 'Disease', 'ID', 'Month', 'Update', 'Year', 'Cases', 'Death', 'Pathogens', 'Disease_Scientific_Name', 'Vaccination',\n 'Lethality', 'Transmission_Medium', 'Minimum_Incubation', 'Maximum_Incubation', 'Host_for_Parasite', 'Source', 'Age_Group', 'Symptoms', 'days', 'outbreak_cluster', 'cfr'])\n for country in countries:\n for disease in diseases_list:\n name = str(country.strip())+'_'+str(disease.strip())\n df_country=df.copy()\n df_country = df_country[df_country['Country']==country]\n df_country = df_country[df_country['Disease']==disease]\n country_disease[name] = len(df_country)\n global Maximum_Incubation\n\n Maximum_Incubation = incubation[disease]\n \n global outbreak_count \n outbreak_count = 0\n\n def cluster_number(X):\n '''Function to assign cluster number to outbreak'''\n global outbreak_count\n global Maximum_Incubation\n day = X['delta_time']\n #print(Maximum_Incubation, outbreak_count)\n x = np.timedelta64(day, 'ns')\n days = x.astype('timedelta64[D]')\n #days / np.timedelta64(1, 'D')\n da = days.astype(int)\n if da > 3*Maximum_Incubation:\n outbreak_count += 1\n \n return outbreak_count\n \n def cfr_assign(X):\n '''Function to calculate CFR for the clustered outbreaks'''\n global cfr\n cluster = X['outbreak_cluster']\n cfr_calc = cfr[cluster]\n\n return(cfr_calc)\n\n if len(df_country) != 0 and len(df_country) > 1:\n print(count, name, len(df_country))\n df_country['days_lagged'] = df_country['days'].shift(1)\n df_country['days_lagged'].iloc[0]= df_country['days'].iloc[0]\n df_country['delta_time'] = df_country['days'] - df_country['days_lagged']\n df_country['outbreak_cluster'] = df_country.apply(cluster_number, 1) \n\n df_country1 = df_country.groupby(['outbreak_cluster']).agg({'Death':'max','Cases':'max'})\n df_country1['cfr'] = df_country1['Death']/df_country1['Cases']*100\n \n global cfr \n cfr = dict(zip(list(df_country1.index), list(df_country1['cfr'])))\n df_country['cfr'] = df_country.apply(cfr_assign,1)\n count += 1\n df_trial = df_country[['Country', 'Date', 'Disease', 'ID', 'Month', 'Update', 'Year', 'Cases', 'Death', 'Pathogens', 'Disease_Scientific_Name', 'Vaccination',\n 'Lethality', 'Transmission_Medium', 'Minimum_Incubation', 'Maximum_Incubation', 'Host_for_Parasite', 'Source', 'Age_Group', 'Symptoms', 'days', 'outbreak_cluster', 'cfr']]\n df_trial.to_csv('cfr_files/'+name+'.csv')\n df_cfr = df_cfr.append(df_trial)\n\n elif len(df_country) != 0:\n print(count, name, len(df_country))\n\n df_country['cfr'] = df_country['Death']/df_country['Cases']\n \n count += 1\n df_trial = df_country[['Country', 'Date', 'Disease', 'ID', 'Month', 'Update', 'Year', 'Cases', 'Death', 'Pathogens', 'Disease_Scientific_Name', 'Vaccination',\n 'Lethality', 'Transmission_Medium', 'Minimum_Incubation', 'Maximum_Incubation', 'Host_for_Parasite', 'Source', 'Age_Group', 'Symptoms', 'days', 'outbreak_cluster', 'cfr']]\n df_trial.to_csv('cfr_files_1/'+name+'.csv')\n df_cfr = df_cfr.append(df_trial)\n\n df_cfr.to_csv('df_cfr.csv')\n\nif __name__ == '__main__':\n main()\n","sub_path":"CFR_Calculation/outbreaks_CFR_0.py","file_name":"outbreaks_CFR_0.py","file_ext":"py","file_size_in_byte":4786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"372636077","text":"'''\r\nExamen Fn - 1.py\r\nCrear una función que regrese el enésimo número par.\r\nnthPar(1) //=> 0, El primer número par es 0\r\nnthPar(3) //=> 4, El tercer número par es 4 (0, 2, 4)\r\nnthPar(100) //=> 198\r\nnthPar(1298734) //=> 2597466\r\n'''\r\n\r\ndef nthPar( n ):\r\n ene = ((n - 1 ) * 2 )\r\n print ( \" El enésimo numero par de \" , n, \"es \" , ene)\r\n\r\nprint(\"-------- Función que regresa el enésimo número par. -------------\")\r\n\r\nn=int(input(\"Digite un número: \"))\r\nnthPar(n) ","sub_path":"Fn - 1.py","file_name":"Fn - 1.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"318130389","text":"import time\n\nimport random\n\nimport urllib.request\n\nURL_TEMPLATE = 'ftp://ftp.sec.gov/edgar/full-index/{}/QTR{}/master.idx'\nBASE_8K_URL = 'ftp://ftp.sec.gov/{}'\nEARLIEST = 1993\nLATEST = 2016\nQ1 = 1\nQ4 = 4\n\nclass Crawler:\n def __init__(self):\n self.master_indices = {}\n\n def get_master_indices(self):\n for i in range(1994, 1995 + 1):\n for j in range(Q1, Q4 + 1):\n if i == 2016 and j == 4:\n pass\n\n else:\n path = URL_TEMPLATE.format(i, j)\n print ('Opening new path: %s' % (path))\n\n\n url = urllib.request.urlopen(path)\n doc = url.read()\n\n self.master_indices[path] = doc\n\n wait = random.randint(10, 30)\n print ('Waiting %s seconds' % (str(wait)))\n\n time.sleep(int(wait))\n\n return self.master_indices\n\n def get_8k_form(self, url):\n\n print ('\\nPulling document from URL %s' % (BASE_8K_URL.format(url)))\n\n result = urllib.request.urlopen(BASE_8K_URL.format(url))\n doc = result.read()\n\n return doc\n","sub_path":"EdgarWebCrawler/crawler/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"340003114","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 24 01:00:56 2021\n\n@author: Q35joih4334\n\"\"\"\n\nimport re\nimport itertools\nimport spacy\nimport spacy.lang.en\nimport matplotlib\nimport ete3\n\ndef _perceived_luminance(r, g, b):\n return .299*r + .587*g + .114*b\n\nclass codifier:\n\n \"\"\"\n Codifies input text to numbers\n \"\"\"\n\n def __init__(self):\n self.codemapper = {}\n\n def codify(self, text):\n if text not in self.codemapper:\n self.codemapper[text] = len(self.codemapper) + 1\n return self.codemapper[text]\n\ndef segment_matching_sents(doc_texts,\n rootword_pattern,\n reverse=False,\n nlp=None,\n sent_splitting='rule'):\n\n \"\"\"\n Matches sentences in text that start or end with rootword pattern. Findgs\n sentences starting with rootword pattern or sentences ending with rootword\n pattern. Segments list of text sentences into list of lists.\n\n Input:\n\n doc_texts: text as a list of strings\n\n rootword_pattern: pattern to be input to spacy.matcher\n\n reverse (default: False): reverse root matching\n\n nlp (optional): spacy nlp object (default: loads spacy en_core_web_sm)\n\n sent_splitting (dep, stat, rule [DEFAULT]): sentence splitting strategy\n\n Output:\n\n list of lists containing sentence tokens as strings\n \"\"\"\n\n if not isinstance(doc_texts, list):\n raise TypeError('doc_texts argument must be list')\n\n if not nlp:\n\n # Dependency parse splitting\n if sent_splitting == 'dep':\n nlp = spacy.load('en_core_web_sm',\n disable=['ner', 'tagger', 'textcat', 'lemmatizer'])\n\n # Statistical sentence segmenter\n elif sent_splitting == 'stat':\n nlp = spacy.load('en_core_web_sm', exclude=[\"parser\"],\n disable=['ner', 'tagger', 'textcat', 'lemmatizer'])\n nlp.enable_pipe('senter')\n\n # Rule-based pipeline component\n elif sent_splitting == 'rule':\n nlp = spacy.lang.en.English()\n nlp.add_pipe('sentencizer')\n\n Codifier = codifier()\n\n matcher = spacy.matcher.Matcher(nlp.vocab)\n matcher.add('rootword', [rootword_pattern])\n\n doc_sents = []\n\n for doc_text_i, text in enumerate(doc_texts):\n\n sent_tokens = []\n\n # NOTE: this might be unsafe if text length is really long\n # TODO: maybe there could be a safer way to do this?\n nlp.max_length = len(text)\n\n doc = nlp(text)\n\n matches = matcher(doc)\n\n if matches:\n\n for match_id, start, end in matches:\n\n span = doc[start:end] #TODO: rename this to root_span\n sent = span.sent\n\n # All tokens after root word\n after_tokens = span.doc[span[-1].i + 1:span.sent[-1].i + 1]\n\n tokens = []\n\n # Rootword match in sentence start\n if span[0].i == span.sent[0].i and not reverse:\n\n for token in sent:\n\n tokens.append({\n '_id': Codifier.codify(token.text.lower()),\n '_token_text': token.text,\n '_whitespace': token.whitespace_,\n '_token_is_punct': token.is_punct})\n\n sent_tokens.append(tokens)\n\n # Rootword match in sentence end\n # If all tokens after root word are non-words (apart from puncts)\n # TODO: or whitespace?\n # TODO: will this skip the sentence if after_tokens is empty?\n elif all([x.is_punct for x in after_tokens]) and reverse:\n\n for token in reversed(sent):\n\n trailing_punct = token.i in [x.i for x in after_tokens]\n\n if not trailing_punct:\n\n tokens.append({\n '_id': Codifier.codify(token.text.lower()),\n '_token_text': token.text,\n '_whitespace': token.whitespace_,\n '_token_is_punct': token.is_punct})\n\n sent_tokens.append(tokens)\n\n # NOTE: All docs are included, regardless of whether matching\n # sentences are found or not. The reason for this is ensuring that\n # doc_sents, doc_refs and doc_attrs are always of equal length.\n # TODO: this is a bit ugly solution, maybe doc_sents, doc_refs and\n # doc_attrs should be concatenated into list of dicts first?\n # Current solution also leaves empty lists in doc_sents\n doc_sents.append(sent_tokens)\n\n return doc_sents\n\n\n# TODO: add a test that the created tree is actually a tree!\n# ete3 gets stuck if trying to enter a non-tree?\n# ete3 doesn't have a test for tree?\ndef tree_from_list(doc_sents,\n doc_refs=None,\n doc_attrs=None):\n\n \"\"\"\n Returns ete3 Tree from a list of lists containing sentences.\n Whitespace is expected to be a separate item in list.\n\n Each token and whitespace represents one node. Tree structure is based on\n lowercase tokens (_node_name). Original text is retained as node attribute\n in each node (_label). A simple version of node name with special\n characters removed is created (_simple_label), which can be useful for\n sorting the tree.\n\n Input:\n\n doc_sents (list of lists): sentences (required)\n\n doc_refs (list of strings): reference for each sentence (optional)\n\n doc_attrs (list of dicts): node attributes to be added to ete3 Tree nodes\n\n Output:\n\n ete3 Tree\n \"\"\"\n\n if not doc_refs:\n doc_refs = [None for x in range(len(doc_sents))]\n\n if not doc_attrs:\n doc_attrs = [{} for x in range(len(doc_sents))]\n\n parent_child_table = []\n node_features = {}\n\n for doc_sent, doc_ref, doc_attr in zip(doc_sents, doc_refs, doc_attrs):\n\n for sent_tokens_i, sent_tokens in enumerate(doc_sent):\n\n # Create (cumulative) sentence structure\n # Also create node attribute data\n sent_node_names = []\n for sent_token_i, sent_token in enumerate(sent_tokens):\n\n cumulative_tokens = sent_tokens[0:sent_token_i + 1]\n\n node_name = '_'.join([str(x['_id']) for x in cumulative_tokens])\n\n simple_label = ''.join([x['_token_text'].lower() for x in cumulative_tokens])\n simple_label = ''.join(filter(str.isalnum, simple_label))\n\n attrs = doc_attr.copy() #collect optional attributes\n attrs.update(sent_token) #add _token_text and _whitespace\n attrs.update({\n '_node_name': node_name,\n '_simple_label': simple_label,\n '_ref': doc_ref})\n\n sent_node_names.append(node_name)\n node_features[node_name] = attrs\n\n # Create parent_child_table for ete3 Tree\n for i in range(len(sent_node_names) - 1):\n\n parent = sent_node_names[i]\n child = sent_node_names[i + 1]\n\n if parent != child:\n parent_child_table.append((parent, child, 0))\n\n # Create ete3 Tree and add node attributes\n tree = ete3.Tree.from_parent_child_table(set(parent_child_table))\n\n for node in tree.traverse():\n node.add_features(**node_features[node.name])\n\n # Clean up nodes\n for node in tree.traverse():\n\n # Remove punct nodes\n # TODO: maybe this should be optional\n #if len(node.get_children()) > 1: #junction\n if len(node.get_leaves()) > 1:\n if node._token_is_punct or node._token_text.strip() == '':\n node.delete()\n\n return tree\n\n# TODO: this could be cleaned up\n# TODO: consider moving layout outside from here\ndef default_treestyle(tree,\n reverse=False,\n highlights=None,\n cmap=matplotlib.cm.get_cmap('tab20'),\n sort_by_name=True,\n sort_by_topology=True):\n\n if not highlights:\n highlights = []\n\n cmap_colors = itertools.cycle([cmap(x) for x in range(cmap.N)])\n highlight_colors = {x.lower():next(cmap_colors) for x in highlights}\n\n # Hide nodes from tree visualization\n for node in tree.traverse():\n node.img_style['fgcolor'] = '#FFFFFFFF'\n node.img_style['bgcolor'] = '#FFFFFFFF'\n node.img_style[\"hz_line_color\"] = '#FFFFFFFF'\n node.img_style['size'] = 0\n\n # TODO: these params should be accessible from default_treestyle\n def text_tree_default_layout(node,\n node_margin=.5,\n space_margin_mult=.4,\n branch_margin=10,\n fontsize_min=8,\n fontsize_max=96):\n\n name_face = ete3.TextFace(node._token_text)\n\n # Handle font size change depending on leave count under the node\n # Limit fontsize\n leaf_count = len(node.get_leaves())\n font_size = sorted([fontsize_min, leaf_count * fontsize_min, fontsize_max])[1]\n name_face.fsize = font_size\n\n # Node margin is added to avoid nodes overlapping\n name_face.margin_left = node_margin\n name_face.margin_right = node_margin\n\n # Add extra margin if node has whitespace\n if reverse and node._whitespace == ' ':\n name_face.margin_left = space_margin_mult * font_size\n\n elif not reverse and node._whitespace == ' ':\n name_face.margin_right = space_margin_mult * font_size\n\n # Add Additional margin in case node is at tree branch\n if len(node.get_sisters()) > 0:\n name_face.margin_left = branch_margin\n\n if len(node.get_children()) > 1: #junction\n name_face.margin_right = branch_margin\n\n # Handle highlighting\n bgcolor = (1, 1, 1, 1)\n for pattern, color in highlight_colors.items():\n if re.search(pattern, node._token_text, re.IGNORECASE):\n bgcolor = color\n break\n\n # Switch text color between white and black depending on background color\n if _perceived_luminance(*bgcolor[:3]) > .5:\n text_color = (0, 0, 0)\n else:\n text_color = (1, 1, 1)\n name_face.inner_background.color = matplotlib.colors.to_hex(bgcolor[:3])\n name_face.fgcolor = matplotlib.colors.to_hex(text_color)\n\n # Add node text\n ete3.faces.add_face_to_node(name_face, node, column=0)\n\n # Add reference\n if node.is_leaf():\n if node._ref:\n ref_face = ete3.TextFace(node._ref, fsize=6)\n ref_face.margin_right = 10\n ref_face.margin_left = 10\n ete3.faces.add_face_to_node(ref_face, node, column=1, aligned=False)\n\n ts = ete3.TreeStyle()\n\n ts.show_leaf_name = False\n ts.layout_fn = text_tree_default_layout\n ts.root_opening_factor = 1\n ts.scale = 20\n ts.show_scale = False\n\n ts.margin_top = 20\n ts.margin_bottom = 20\n ts.margin_right = 20\n\n # ts.draw_guiding_lines = True\n # ts.guiding_lines_color = \"#cccccc\"\n\n if reverse:\n ts.orientation = 1\n else:\n ts.orientation = 0\n\n # Sort\n # TODO: check if _simple_label exists in all nodes\n # NOTE: such check is actually not necessary if tree_from_list has been used\n if sort_by_name:\n tree.sort_descendants('_simple_label')\n\n if sort_by_topology:\n tree.ladderize()\n\n return tree, ts\n\ndef draw_tree(doc_texts,\n rootword_pattern,\n output_file,\n reverse=False,\n doc_refs=None,\n doc_attrs=None,\n highlights=None,\n nlp=None,\n sent_splitting='rule'):\n\n \"\"\"\n Build text tree and save it as a file.\n\n This is a convenience function that runs all other function in a simple\n manner.\n\n Input:\n\n texts: lists of texts (strings) to be presented as a tree\n\n rootword_pattern: pattern to be input to spacy.matcher\n\n output_file: path for the tree to be drawn\n\n reverse (default: False): reverse segmentation (for sentences ending\n with word of interest)\n\n doc_refs (optional): list of strings containing references to be displayed next to sentence\n\n highlights (optional): list of regexp patterns to use for highlighting nodes\n\n Output:\n\n tree as ete3 object\n\n segmented texts\n\n \"\"\"\n\n if not doc_refs:\n doc_refs = [None for x in range(len(doc_texts))]\n\n if not doc_attrs:\n doc_attrs = [{} for x in range(len(doc_texts))]\n\n # Extract matching sentences\n print('Extracting matching sentences.', end=' ')\n doc_sents = segment_matching_sents(doc_texts,\n rootword_pattern,\n reverse=reverse,\n nlp=nlp,\n sent_splitting=sent_splitting)\n\n # Check for how many sentences were found\n total_matching_sentences = sum([len(x) for x in doc_sents])\n print('Found {} sentences.'.format(total_matching_sentences))\n\n if total_matching_sentences == 0:\n print('No matching sentences found, skipping.')\n return None, None\n\n # Make tree\n print('Building tree.')\n tree = tree_from_list(doc_sents, doc_refs=doc_refs, doc_attrs=doc_attrs)\n\n # Apply style\n print('Applying style.')\n tree, ts = default_treestyle(tree, reverse=reverse, highlights=highlights)\n\n # Render to file\n print('Rendering tree to {}'.format(output_file))\n tree.render(output_file, tree_style=ts)\n\n return tree, doc_sents\n","sub_path":"text_tree/text_tree.py","file_name":"text_tree.py","file_ext":"py","file_size_in_byte":13946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"293625451","text":"from django.apps import apps\nfrom django.core.management.base import BaseCommand\n\n\nclass BaseAddressBaseCommand(BaseCommand):\n \"\"\"\n Turn off auto system check for all apps\n We will maunally run system checks only for the\n 'addressbase' and 'pollingstations' apps\n \"\"\"\n requires_system_checks = False\n\n def perform_checks(self):\n \"\"\"\n Manually run system checks for the\n 'addressbase' and 'pollingstations' apps\n Management commands can ignore checks that only apply to\n the apps supporting the website part of the project\n \"\"\"\n self.check([\n apps.get_app_config('addressbase'),\n apps.get_app_config('pollingstations')\n ])\n","sub_path":"polling_stations/apps/addressbase/management/base_command.py","file_name":"base_command.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"3762348","text":"# encoding=utf-8\n\n\"\"\"\n\n\n\"\"\"\nimport calendar\nimport pandas as pd\nimport talib\nimport numpy as np\nimport tushare as ts\nimport math\nimport matplotlib\nmatplotlib.use('agg')\nfrom io import BytesIO\n\nfrom CornerDetectAndAutoEmail.Sub import genStkPicForQQ, genStkIdxPicForQQ\nfrom RelativeRank.Sub import get_k_data_JQ, my_pro_bar\nfrom SendMsgByQQ.QQGUI import send_qq\nfrom SendMsgByQQ.SendPicByQQ import send_pic_qq, send_pic_qq_data\nfrom PIL import Image\n\nfrom pylab import *\nfrom SDK.MyTimeOPT import get_current_date_str, add_date_str\n\n\ndef week_macd_stray_judge(stk_code, win_qq_name):\n\n # 获取今天的大盘走势,涨幅没有超过3%的不考虑\n df_now = get_k_data_JQ(stk_code, count=2, end_date=get_current_date_str()).reset_index()\n\n if (df_now.tail(1)['close'].values[0]-df_now.head(1)['close'].values[0])/df_now.head(1)['close'].values[0] < 0.03:\n print('函数week_macd_stray_judge:' + stk_code + '涨幅不够!')\n return False\n\n\n df = get_k_data_JQ(stk_code, count=400, end_date=get_current_date_str()).reset_index()\n\n if len(df) < 350:\n print('函数week_macd_stray_judge:'+stk_code + '数据不足!')\n return False\n\n # 规整\n df_floor = df.tail(math.floor(len(df)/20)*20-19)\n\n # 增加每周的星期几\n df_floor['day'] = df_floor.apply(\n lambda x: calendar.weekday(int(x['date'].split('-')[0]), int(x['date'].split('-')[1]),\n int(x['date'].split('-')[2])), axis=1)\n\n # 取均值\n # df_floor['close_m5'] = df_floor['close'].rolling(window=5).mean()\n\n\n # 增加每周的星期几\n df_floor['day'] = df_floor.apply(lambda x: calendar.weekday(int(x['date'].split('-')[0]), int(x['date'].split('-')[1]), int(x['date'].split('-')[2])), axis=1)\n\n # 隔着5个取一个\n if df_floor.tail(1)['day'].values[0] != 4:\n df_floor_slice_5 = pd.concat([df_floor[df_floor.day == 4], df_floor.tail(1)], axis=0)\n else:\n df_floor_slice_5 = df_floor[df_floor.day == 4]\n\n # 计算指标\n df_floor_slice_5['MACD'], df_floor_slice_5['MACDsignal'], df_floor_slice_5['MACDhist'] = talib.MACD(df_floor_slice_5.close,\n fastperiod=6, slowperiod=12,\n signalperiod=9)\n\n # 隔着20个取一个(月线)\n df_floor_slice_20 = df_floor.loc[::20, :]\n\n # 计算指标\n df_floor_slice_20['MACD'], df_floor_slice_20['MACDsignal'], df_floor_slice_20['MACDhist'] = talib.MACD(\n df_floor_slice_20.close,\n fastperiod=4,\n slowperiod=8,\n signalperiod=9)\n\n # 获取最后的日期\n date_last = df_floor_slice_5.tail(1)['date'].values[0]\n\n # 判断背离\n macd_5 = df_floor_slice_5.tail(3)['MACD'].values\n macd_20 = df_floor_slice_20.tail(4)['MACD'].values\n if (macd_5[1] == np.min(macd_5)) & (macd_20[1] != np.max(macd_20)) & (macd_20[2] != np.max(macd_20)):\n\n df_floor_slice_5.plot('date', ['close', 'MACD'], subplots=True, style=['--', '*'])\n plt.title(stk_code + 'week-stray'+date_last)\n\n output = BytesIO() # BytesIO实现了在内存中读写byte\n buf_save = BytesIO()\n\n plt.savefig(output)\n output.seek(0)\n img = Image.open(output) # Image.open可以打开网络图片与本地图片。\n\n img.convert(\"RGB\").save(buf_save, \"BMP\") # 以RGB模式保存图像\n data = buf_save.getvalue()[14:]\n buf_save.close()\n output.close()\n plt.close()\n\n send_pic_qq_data(win_qq_name, data)\n\n # 打印月线图\n df_floor_slice_20.plot('date', ['close', 'MACD'], subplots=True, style=['--', '*'])\n plt.title(stk_code + 'month-stray' + date_last)\n\n output = BytesIO() # BytesIO实现了在内存中读写byte\n buf_save = BytesIO()\n\n plt.savefig(output)\n output.seek(0)\n img = Image.open(output) # Image.open可以打开网络图片与本地图片。\n\n img.convert(\"RGB\").save(buf_save, \"BMP\") # 以RGB模式保存图像\n data = buf_save.getvalue()[14:]\n buf_save.close()\n output.close()\n plt.close()\n\n send_pic_qq_data(win_qq_name, data)\n\n return True\n else:\n return False\n\n\ndef checkWeekStrayForAll():\n\n win_qq_name = '影子'\n\n df_total = ts.get_stock_basics()\n for stk in df_total.index:\n if int(str(df_total.loc[stk, 'timeToMarket'])[:4]) >= int(get_current_date_str()[:4]) - 4:\n print('函数 checkWeekStrayForAll:'+stk+'年龄不够4岁!')\n continue\n\n try:\n stray_flag = week_macd_stray_judge(stk, win_qq_name)\n except:\n print('函数 checkWeekStrayForAll:' + stk + '判断翻转出错!')\n stray_flag = False\n\n if stray_flag:\n df = get_k_data_JQ(stk, count=400, end_date=get_current_date_str())\n fig, _ = genStkPicForQQ(df)\n\n plt.title(str(stk))\n send_pic_qq(win_qq_name, fig)\n plt.close()\n\n fig, _ = genStkIdxPicForQQ(df)\n\n plt.title(str(stk))\n send_pic_qq(win_qq_name, fig)\n plt.close()\n else:\n print('No weed-stray in '+stk)\n # send_qq(win_qq_name, 'No weed-stray in '+stk)\n\n\n\n\n\nif __name__ == '__main__':\n\n\n r = calendar.weekday(2019, 12, 5)\n from JQData_Test.auth_info import *\n\n # week_macd_stray_judge('300183', '影子')\n\n checkWeekStrayForAll()\n\n stk_code = '600707'\n df = get_k_data_JQ(stk_code, count=600, end_date=get_current_date_str()).reset_index()\n\n # 规整\n df_floor = df.tail(math.floor(len(df)/20)*20-19)\n\n # 取均值\n df_floor['close_m5'] = df_floor['close'].rolling(window=5).mean()\n\n # 增加每周的星期几\n df_floor['day'] = df_floor.apply(lambda x: calendar.weekday(int(x['date'].split('-')[0]), int(x['date'].split('-')[1]), int(x['date'].split('-')[2])), axis=1)\n\n # 隔着5个取一个\n # df_floor_slice_5 = df_floor.loc[::5, :]\n if df_floor.tail(1)['day'].values[0] != 4:\n df_floor_slice_5 = pd.concat([df_floor[df_floor.day==4], df_floor.tail(1)], axis=0)\n else:\n df_floor_slice_5 = df_floor[df_floor.day==4]\n\n # 计算指标\n df_floor_slice_5['MACD'], df_floor_slice_5['MACDsignal'], df_floor_slice_5['MACDhist'] = talib.MACDEXT(df_floor_slice_5.close,\n fastperiod=6, slowperiod=12,\n signalperiod=9)\n\n df_floor_slice_5['MACD'] = df_floor_slice_5.apply(lambda x: 2*x['MACD'], axis=1)\n\n df_floor_slice_5['close_M26'] = df_floor_slice_5['close'].rolling(window=26).mean()\n df_floor_slice_5['close_M12'] = df_floor_slice_5['close'].rolling(window=12).mean()\n\n df_floor_slice_5['diff'] = df_floor_slice_5.apply(lambda x: x['close_M12'] - x['close_M26'], axis=1)\n df_floor_slice_5['diff_M9'] = df_floor_slice_5['diff'].rolling(window=9).mean()\n\n df_floor_slice_5['myMACD'] = df_floor_slice_5.apply(lambda x: 2*(x['diff'] - x['diff_M9']), axis=1)\n\n\n checkWeekStrayForAll()\n\n\n\n\n \"\"\"\n 画图\n df_floor_slice_5.plot('date', ['close', 'MACD', 'myMACD'], subplots=True, style=['--*', '*', '*'])\n \"\"\"\n\n end = 0","sub_path":"MACD_Stray_Analysis/Demo1.py","file_name":"Demo1.py","file_ext":"py","file_size_in_byte":7340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"529438507","text":"import rcpy \nimport rcpy.servo as servo\nimport rcpy.clock as clock\nimport time\nimport rcpy.mpu9250 as mpu9250\nimport numpy as np\n\nFORE = 0\nSTARBOARD = 1\nAFT = 2\nPORT = 3\n\nX_AXIS = 0\nY_AXIS = 1\nZ_AXIS = 2\n\nKP_XY = 0.14\nKI_XY = 0.00\nKD_XY = 0.07\n\nTARGET_THROTTLE = 0.40\nTARGET_MIN_THROTTLE = 0.15\n\nMAX_THROTTLE = 0.99\nTHROTTLE_OFF = 0.00\n\nT_RAMP_UP = 2.0\nT_RAMP_DOWN = 4.0\n\nclass PidController:\n def __init__(self, kp, ki, kd):\n self.kp = kp\n self.ki = ki\n self.kd = kd\n self.t = None\n self.e = None\n self.i = None\n \n def initialize(self, initial_time, initial_error):\n self.t = initial_time\n self.e = initial_error\n self.i = 0.0\n \n def update(self, time, error):\n dt = time - self.t\n p = error\n self.i += error * dt\n d = (error - self.e) / dt\n pid = self.kp * p + self.ki * self.i + self.kd * d\n self.e = error\n self.t = time\n return pid\n\nclass Throttles:\n \n PULSE_FREQUENCY = 0.02\n \n def __init__(self, maximum, minimum):\n self.maximum = maximum\n self.minimum = minimum\n self.throttles = []\n self.escs = [servo.ESC(FORE+1), servo.ESC(STARBOARD+1), servo.ESC(AFT+1), servo.ESC(PORT+1)] # header is numbered 1..8\n self.clks = []\n for esc in self.escs:\n self.clks.append(clock.Clock(esc, Throttles.PULSE_FREQUENCY))\n self.armed = False\n \n def govern(self):\n self.throttles = np.minimum(self.maximum, self.throttles)\n self.throttles = np.maximum(self.minimum, self.throttles)\n \n def set(self, values):\n self.throttles = values\n self.govern()\n \n def add(self, values):\n self.throttles = np.add(self.throttles, values)\n self.govern()\n \n def update(self):\n for esc, throttle in zip(self.escs, self.throttles):\n esc.set(throttle)\n \n def arm(self):\n for esc in self.escs:\n esc.pulse(-0.1)\n esc.set(-0.1) \n self.armed = True\n \n def start(self):\n if not self.armed:\n raise ValueError('cannot start throttles; ESCs are not armed')\n for clk in self.clks:\n clk.start()\n \n def stop(self):\n self.set([THROTTLE_OFF for _ in range(len(self.escs))])\n for clk in self.clks:\n clk.stop()\n \n def __str__(self):\n return str(self.throttles)\n\ndef countdown(n):\n for i in range(n):\n print(n-i)\n time.sleep(1.0)\n\ndef throttle(time):\n if time < T_RAMP_UP:\n return TARGET_MIN_THROTTLE + TARGET_THROTTLE\n elif time < T_RAMP_UP + T_RAMP_DOWN:\n return TARGET_MIN_THROTTLE + TARGET_THROTTLE * (T_RAMP_DOWN - (time - T_RAMP_UP))/T_RAMP_DOWN\n else:\n return THROTTLE_OFF\n\ntry:\n rcpy.set_state(rcpy.RUNNING)\n mpu9250.initialize(enable_dmp = True, dmp_sample_rate = 100, enable_fusion = True)\n \n throttles = Throttles(MAX_THROTTLE, THROTTLE_OFF)\n throttles.arm()\n\n pid_cx = PidController(KP_XY,KI_XY,KD_XY)\n pid_cy = PidController(KP_XY,KI_XY,KD_XY)\n\n countdown(5)\n \n throttles.start()\n data = mpu9250.read()\n \n t0 = time.time()\n tb0 = data['tb']\n \n pid_cx.initialize(t0, tb0[X_AXIS])\n pid_cy.initialize(t0, tb0[Y_AXIS])\n \n while time.time() < t0 + T_RAMP_UP + T_RAMP_DOWN + 1.0:\n tb = mpu9250.read()['tb']\n t1 = time.time()\n pid_x = pid_cx.update(t1, tb[X_AXIS])\n pid_y = pid_cy.update(t1, tb[Y_AXIS])\n \n base_throttle = throttle(t1 - t0)\n #print(base_throttle)\n \n throttles.set([base_throttle for _ in range(4)])\n throttles.add((pid_x,-pid_y,-pid_x,pid_y))\n throttles.update()\n\nfinally:\n throttles.stop()\n servo.disable()","sub_path":"hop-test.py","file_name":"hop-test.py","file_ext":"py","file_size_in_byte":3829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"348782656","text":"#coding:utf-8\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n\"\"\"\n Given a non-empty special binary tree consisting of nodes with the non-negative value, where each node in this tree has exactly two or zero sub-node. If the node has two sub-nodes, then this node's value is the smaller value among its two sub-nodes.\n\n Given such a binary tree, you need to output the second minimum value in the set made of all the nodes' value in the whole tree.\n\n If no such second minimum value exists, output -1 instead.\n\n 关键是理解题意,对于这种二叉树,显然,root节点就是最小值节点,要寻找第二小节点,遍历节点即可。tips:只遍历子节点中最小的节点\n\"\"\"\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution(object):\n def findSecondMinimumValue(self, root):\n if not root or not root.left or (root.left.val == root.val and root.right.val == root.val):\n return -1\n self.value = root.val\n if root.left.val == self.value:\n res = root.right.val\n else:\n res = root.left.val\n def search(root):\n if not root:\n return None\n if root.val != self.value and root.val < self.value:\n self.res = root.val\n search(root.left)\n search(root.right)\n search(root)\n return self.res\n \n","sub_path":"leetcode/python/671.Second_Minimum_Node.py","file_name":"671.Second_Minimum_Node.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"238721226","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n Date: 2019/05/21\n Author: Guo Zijian\n Abstract: find my mates control node\n\"\"\"\nimport roslib\nimport rospy\nfrom std_msgs.msg import String\nfrom std_msgs.msg import Int8\nimport os\nimport sys\nimport time\nimport wave\nimport datetime\nimport pyaudio\n#from kamerider_speech.msg import mission\nfrom sound_play.libsoundplay import SoundClient\nfrom read_xml_files import main as read_main\nfrom play_signal_sound import play_signal_sound\nfrom turtlebot_msgs.srv import SetFollowState\nfrom geometry_msgs.msg import Pose\nfrom kamerider_image_msgs.msg import mission\nfrom nav_msgs.msg import Odometry\n\nclass find_my_mates(object):\n\n def __init__(self):\n rospy.on_shutdown(self.cleanup)\n self.voice = rospy.get_param(\"~voice\", \"voice_kal_diphone\")\n self.question_start_signal = rospy.get_param(\"~question_start_signal\", \"~/catkin_ws/src/kamerider_speech/sounds/question_start_signal.wav\")\n\n self.pud_destination_topic_name = None\n self.pub_take_photo_topic_name = None\n self.pub_person_pos_topic_name = None\n self.pub_jack_topic_name = None\n self.sub_xfei_back_topic_name = None\n self.sub_is_reach_topic_name = None\n self.sub_detect_result_topic_name = None\n self.sub_person_pos_topic_name = None\n \n #self.pub_get_pose_signal_topic_name = None\n\n # Variables\n self.destination = None\n self.is_reach = False\n self.is_pub_signal = False\n self.is_pub_destination = False\n self.is_pub_person_pos = False\n self.is_report = False\n self.person_name = False\n self.count = 0\n self.person_num = 0\n self.person_pos = []\n self.description = []\n self.names = ['', '', '']\n self.guest_names = []\n\n # Initialize sound client\n self.sh = SoundClient(blocking=True)\n rospy.sleep(1)\n self.sh.stopAll()\n rospy.sleep(1)\n\n self.get_params()\n self.start_fmm()\n\n print(\"START\")\n while not rospy.is_shutdown():\n if self.is_pub_destination:\n msg = String()\n msg.data = self.destination + str(self.count)\n self.pub_destination.publish(msg)\n self.is_pub_destination = False\n if self.is_reach:\n if self.is_report:\n self.sh.say(\"I have found the guests\", self.voice)\n self.sh.say(\"here are the results\", self.voice)\n for line in self.description:\n self.sh.say(line, self.voice)\n self.is_report = False\n else:\n rospy.sleep(1.5)\n if not self.is_pub_signal:\n msg = String()\n msg.data = 'take_photo'\n self.pub_take_photo.publish(self.pub_take_photo_topic_name)\n self.is_pub_signal = True\n if self.person_num0:\n say_name = ''\n if \"living\" in output:\n for name in self.guest_names:\n say_name = say_name + name + ''\n self.destination = 'livingroom'\n self.sh.say(\"I will go to the living room to find \" + say_name, self.voice)\n self.sh.say(\"Please wait for a moment\", self.voice)\n self.is_pub_destination = True\n elif \"bedroom\" in output or 'bathroom' in output:\n self.destination = 'bedroom'\n self.sh.say(\"I will go to the bedroom to find \" + say_name, self.voice)\n self.sh.say(\"Please wait for a moment\", self.voice)\n self.is_pub_destination = True\n elif \"dining\" in output:\n self.destination = 'diningroom'\n self.sh.say(\"I will go to the dining room to find \" + say_name, self.voice)\n self.sh.say(\"Please wait for a moment\", self.voice)\n self.is_pub_destination = True\n elif \"kitchen\" in output:\n self.destination = 'kitchen'\n self.sh.say(\"I will go to the kitchen to find \" + say_name, self.voice)\n self.sh.say(\"Please wait for a moment\", self.voice)\n self.is_pub_destination = True\n else:\n self.sh.say(\"I did not hear your command\", self.voice)\n self.sh.say('please ask me again', self.voice)\n \n print('[INFO] destination is ', self.destination)\n\n\n def cleanup(self):\n self.sh.say(\"I have finished the task\", self.voice)\n\nif __name__ == '__main__':\n rospy.init_node(\"fmm_control\", anonymous=True)\n ctrl = find_my_mates()\n rospy.spin()\n","sub_path":"find_my_mates/src/fmm_control.py","file_name":"fmm_control.py","file_ext":"py","file_size_in_byte":9791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"346271065","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\n YouTube downloading script\n\n\"\"\"\nimport sys\nimport time\nfrom colorama import init, Fore\nimport yt_youtube\nimport yt_files\ninit(autoreset=True)\nTERM: int = 79 # terminal window width\n\n\ndef main():\n \"\"\"\n\n Main program flow\n\n \"\"\"\n print()\n print(' ' * 32 + Fore.RED + '[>]' + Fore.RESET + ' YouTube' + ' ' * 36)\n print(' ' * 30 + 'downloading script' + ' ' * 31)\n print(str('-' * 23).center(TERM))\n\n links = yt_files.parse_links()\n\n if not links:\n print(Fore.LIGHTRED_EX + f'Nothing to download.'.center(TERM))\n sys.exit()\n\n print(f'Links to download: {len(links)}'.center(TERM))\n\n time_started = time.time()\n local_time = time.strftime('%H:%M', time.localtime())\n\n print()\n print(Fore.YELLOW + f'{yt_files.SAVE_PATH}'.center(TERM))\n print(Fore.CYAN + f'(Start at {local_time})'.center(TERM, '-'))\n\n yt_youtube.download_all(links)\n\n minutes = int((time.time() - time_started) / 60)\n local_time = time.strftime('%H:%M', time.localtime())\n\n if minutes:\n print(Fore.CYAN\n + f'(Finish at {local_time}, {minutes} minutes spent)'.center(TERM, '-'))\n else:\n print(Fore.CYAN\n + f'(Finish at {local_time})'.center(TERM, '-'))\n\n if yt_youtube.TOTAL_FILES > 0 and yt_youtube.TOTAL_MBS > 0:\n sfiles = str(yt_youtube.TOTAL_FILES)\n smbs = str(round(yt_youtube.TOTAL_MBS, 2))\n print(Fore.GREEN + f'Download complete. New files: {sfiles} ({smbs} mb)'.center(TERM))\n else:\n print(Fore.YELLOW + 'No new files were downloaded.'.center(TERM))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"343618477","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport argparse\nimport json\nimport os\nimport re\nimport sys\nimport types\nimport unittest\n\nimport requests\n\n\nCOMMENT_TOKEN = '#'\nNAME_TOKEN = '**'\nCOMMAND_TOKEN = '>'\nSETUP_TOKEN = 'setup'\n\n\nclass Parser(object):\n\n def __init__(self, path):\n self.cls_name = 'Test{}'.format(os.path.basename(path).split('.')[0].capitalize())\n with open(path) as f:\n self.dsl = f.read().split('\\n')\n\n self.variables = {}\n self.cls_methods = {}\n\n def build(self):\n self._parse()\n\n self.cls = type(self.cls_name, (DSLTestCase,), {})\n for k, v in self.cls_methods.items():\n cmd = v[0][len(COMMAND_TOKEN):].strip().split(' ')\n for offset, item in enumerate(cmd):\n item = re.sub(r'\\$\\w+', self._replace_vars, item)\n cmd[offset] = item\n expected = v[1:]\n func = self._build_test(cmd, expected)\n setattr(self.cls, k, func)\n\n return self.cls\n\n def _replace_vars(self, item):\n key = item.group(0)[1:]\n if key in self.variables:\n return self.variables[key]\n else:\n print('\"{}\" has not been set'.format(key))\n sys.exit(1)\n\n\n def _build_test(self, *args, **kwargs):\n return lambda s: s.run_dsl_test(*args, **kwargs)\n\n def _parse(self):\n test_name = None\n test_line_num = 1\n test_lines = []\n\n for line in self.dsl:\n line = line.strip()\n if not line or line.startswith(COMMENT_TOKEN):\n continue\n elif line.startswith(NAME_TOKEN):\n self._add_test_line(test_name, test_lines)\n test_name = line[len(NAME_TOKEN):].strip()\n test_lines = []\n else:\n test_lines.append(line)\n self._add_test_line(test_name, test_lines)\n\n def _add_test_line(self, test_name, test_lines):\n if not test_name:\n return\n elif test_name == SETUP_TOKEN:\n for var in test_lines:\n name, value = var.split('=')\n self.variables[name.strip()] = value.strip()\n else:\n func_name = 'test_{}'.format(test_name.replace(' ', '_'))\n self.cls_methods[func_name] = test_lines\n\n\nclass DSLTestCase(unittest.TestCase):\n\n def setUp(self):\n super(DSLTestCase, self).setUp()\n\n def run_dsl_test(self, cmd, expected):\n method = cmd[0].strip().upper()\n url = cmd[1].strip()\n params = {p.split('=')[0].strip(): p.split('=')[1].strip() for p in cmd[2:]}\n expected = re.sub(':\\s*', r':', '\\n'.join(expected))\n\n result = request(method, url, **params)\n m = re.search(expected, result)\n msg = 'AssertionError: %r doest not match %r' % (expected, result)\n self.assertTrue(m, msg)\n\n\ndef request(method, url, **kwargs):\n if method == 'GET':\n r = requests.get(url, params=kwargs)\n elif method == 'POST':\n r = requests.post(url, data=kwargs)\n else:\n print(':-)')\n sys.exit(1)\n ret = 'status: {}\\n{}'.format(r.status_code, r.text)\n result = re.sub(':\\s*', r':', ret)\n return result\n\n\ndef commandline():\n parser = argparse.ArgumentParser()\n parser.add_argument('-f', '--test-file', help='the test file path', metavar='FILE')\n parser.add_argument('-d', '--debug', help='debug mode', action='store_false')\n return parser.parse_args()\n\n\ndef main():\n args = commandline()\n if not args.test_file:\n path = os.path.abspath(__file__)\n f = os.path.join(os.path.dirname(path), 'api.test')\n else:\n f = os.path.abspath(args.test_file)\n\n print('Staring test in {}'.format(f))\n parser = Parser(f)\n loader = unittest.TestLoader()\n suite = unittest.TestSuite()\n suite.addTests(loader.loadTestsFromTestCase(parser.build()))\n\n unittest.TextTestRunner(verbosity=2).run(suite)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"dsl.py","file_name":"dsl.py","file_ext":"py","file_size_in_byte":3998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"580115950","text":"# -*- coding:UTF-8 -*-\n\n# 定义停车场的类\nfrom park_position import Park_position\nfrom park_order import Park_order\n\n\nclass Park():\n\n def __init__(self, p_address, p_name, max_num, p_model, p_size, p_limit, place_kind):\n self.p_address = p_address\n self.p_name = p_name\n self.max_num = max_num\n self.p_model = p_model\n self.p_size = p_size\n self.p_limit = p_limit\n self.place_kind = place_kind\n\n def produce_record(self):\n print('停车场根据车辆信息产生停车记录')\n\n def update_record(self):\n print('停车场根据停车位置修改停车记录')\n\n def produce_order_record(self, name, park_time, pay_way, leave_time, pay_status):\n count = (park_time/3600)*2\n print('停车场根据停车时间生成订单')\n print(' 订单编号:000001')\n print(' 应付金额:' + str(count) + '元')\n print(' 支付方式:' + pay_way)\n print(' 支付时间:' + leave_time)\n print(' 支付人:' + name)\n print(' 支付状态:' + pay_status)\n\n","sub_path":"park_depot.py","file_name":"park_depot.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"94035577","text":"# Imports and options\nimport pandas as pd\nimport numpy as np\nimport os\n\npd.options.display.width = 0\n\n# Set base dirs\nmapper_base = r'resources'\nbase = r'data'\n\n# ------------------------------------ read time_mapper ------------------------------------ #\nfilename = r'time_mapper.csv'\nfullpath = os.path.join(mapper_base, filename)\ntime_mapper = pd.read_csv(fullpath)\n\n# ------------------------------------ read line_mapper ------------------------------------ #\nfilename = r'line_mapper.csv'\nfullpath = os.path.join(mapper_base, filename)\nline_mapper = pd.read_csv(fullpath)\nline_mapper = line_mapper.assign(device_number=line_mapper['device_number'].str.split(','))\n\n# ------------------------------------ read and format file 1 ------------------------------------ #\n# Read data\nfilename = r'sensor_joined.xlsx'\nfullpath = os.path.join(base, filename)\n\ndtypes = {'sensor_id': np.int32,\n 'lVarId': np.int32,\n 'HMI': np.int32,\n 'op_area_name': str,\n 'op_area_number': np.int32,\n 'device_kind': str,\n 'device_number': np.int32,\n 'sensor_tagname': str,\n 'sensor_name': str,\n 'sensor_name_edited': str,\n 'desc': str,\n 'out_pipes': str,\n 'in_pipes': str}\n\na = pd.read_excel(io=fullpath,\n dtypes=dtypes) \\\n .rename({'id': 'sensor_id'}, axis=1)\n\n# Coerce non numerics from columns\na['device_number'] = pd.to_numeric(a['device_number'], errors='coerce')\n\n# Concat xx(device_kind) to op_area_name and number\ncond = a['device_kind'] == 'xx'\na.loc[cond, 'device_kind'] = a['op_area_name'][cond] + a['op_area_number'][cond].astype(str) + 'xx'\n\n# ------------------------------------ read and format file 2 ------------------------------------ #\n# Read data\nfilename = r'tables_cottage_stages.csv'\nfullpath = os.path.join(base, filename)\n\ndtypes = {'batchid': int,\n 'stageid': int,\n 'stage': str}\n\ndate_cols = ['start', 'finish']\n\nb = pd.read_csv(filepath_or_buffer=fullpath,\n parse_dates=date_cols) \\\n .rename({'stageid': 'device_number',\n 'stage': 'device_kind',\n 'id': 'read_id',\n 'batchid': 'batch_id'}, axis=1)\n\n# Convert device_number non numeric to Null\n# Lower strings\nb = b.assign(device_number=pd.to_numeric(b['device_number'], errors='coerce'),\n device_kind=b['device_kind'].str.lower(),\n delta=((b['finish'] - b['start']) / np.timedelta64(1, 'h'))\n )\n\nb = b.assign(delta_q_bins=b.groupby(['device_kind'])['delta'].transform(\n lambda x: pd.qcut(x, q=10, precision=7, labels=False, duplicates='drop'))\n)\n\n# Remove 'packing' category\n# Set some dates to Null\ncond1 = (b['device_kind'] != 'packing')\nb.loc[:, :] = b[cond1]\n\ncond2 = (b['start'].dt.year != 2019) \\\n | (b['finish'].dt.year != 2019) \\\n | (b['finish'] == pd.Timestamp('2019-02-12 12:46:00')) \\\n | (b['finish'] == pd.Timestamp('2019-02-12 12:47:00')) \\\n | (b['delta'] < 0) \\\n | (b['delta'] > 15)\n\nb.loc[cond2, ['finish', 'start']] = None\n\n# Groupby in order to remove batches that have multiple device kinds\nb = b.groupby(['batch_id', 'device_kind']).agg({'device_number': 'max',\n 'start': 'max',\n 'finish': 'max'}) \\\n .reset_index()\n\n# -------------------------------------------------------- #\n# Pivot table by batch number, each line would thus contain one batch, and each column a diff device_kind + start\n# and finish columns\n# -------------------------------------------------------- #\nxxx = b.pivot_table(index='batch_id',\n columns='device_kind',\n values=['start', 'finish', 'device_number'],\n aggfunc='first')\n\n# Flatten column multiindex\nxxx.columns = ['_'.join(col).strip() for col in xxx.columns]\n\n# Do the same for the pipes\n# make an empty pipes df and concat with previous df\npipes = ['cc1xx', 'ps1xx']\npipes = ['start_' + i for i in pipes] + ['finish_' + i for i in pipes] + ['device_number_' + i for i in pipes]\n\npipes = pd.DataFrame(columns=pipes,\n data=np.empty((xxx.shape[0], pipes.__len__())),\n index=xxx.index)\npipes[:] = np.nan\n\nxxx = pd.concat([xxx, pipes], axis=1).reset_index()\n\n# Set the start and finish times for each pipe by the logic in the 'time_mapper' file\nfor _, r in time_mapper.iterrows():\n xxx[r['to']] = xxx[r['from']]\n\n# Set line numbers for each pipe by the logic in the 'line_mapper' file\nfor _, r in line_mapper.iterrows():\n line_list = r['device_number']\n\n device_columns = r['col_a']\n line_column = r['col_b']\n\n cond = xxx[r['col_a']].isin(line_list)\n xxx.loc[cond, line_column] = r['line_number']\n\n# Convert back to the long form\nstubnames = [\"device_number\", \"start\", \"finish\"]\nxxx = pd.wide_to_long(df=xxx, stubnames=stubnames, i=\"batch_id\", j=\"device_kind\", sep='_', suffix='.*').reset_index()\n\n# ------------------------------------ merge files ------------------------------------ #\nmerged = a.merge(xxx, left_on=['device_kind', 'device_number'], right_on=['device_kind', 'device_number'])\nmerged = merged.sort_values(['batch_id', 'device_kind'], ascending=[True, True]).reset_index()\nmerged = merged.drop(['sensor_id','out_pipes','in_pipes','index'], axis = 1)\n\n# ----------------------------------- find sensors that did not match with batch ----------------------------------- #\nm2 = a.merge(xxx, how='left', left_on=['device_kind', 'device_number'], right_on=['device_kind', 'device_number'])\ncond = m2['batch_id'].isna()\n\n# m2[cond].to_clipboard()\n","sub_path":"add_the_lines_new.py","file_name":"add_the_lines_new.py","file_ext":"py","file_size_in_byte":5659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"247911702","text":"import RPi.GPIO as GPIO\nimport time\n\n#Declare pin numbering system\nGPIO.setmode(GPIO.BCM)\n#Disable warning messages\nGPIO.setwarnings(False)\n\n#Setting up the pins we will use\nGPIO.setup(4,GPIO.IN) #HEK\nGPIO.setup(17,GPIO.IN) #DEUR\nGPIO.setup(21,GPIO.OUT) #GROEN\nGPIO.setup(20,GPIO.OUT) #GEEL\nGPIO.setup(12,GPIO.OUT) #ROOD\n\n#Run the entire program in an infinite loop\nwhile True:\n #Check welke knop wordt ingedrukt\n if (GPIO.input(4) == True):\n #Als het hek wordt geopend:\n print(\"Hek Open\")\n #Gele LED brand 10 seconden\n GPIO.output(20,GPIO.HIGH)\n time.sleep(10)\n GPIO.output(20,GPIO.LOW)\n #Controleer of code binnen 10 seconden wordt ingevoerd\n if (GPIO.input(17) == True):\n #Binnen 10 seconden deur geopend\n code = raw_input(\"Voer pincode in: \")\n if code == '0000':\n #Correcte code ingevoerd\n #Groene LED brand 5 seconden\n GPIO.output(21, GPIO.HIGH)\n time.sleep(5)\n GPIO.output(21,GPIO.LOW)\n #Terug naar begin loop\n else:\n #Verkeerde code ingevoerd\n #Rode LED blinkt 5 seconden\n for i in range(5):\n GPIO.output(12,GPIO.HIGH)\n time.sleep(1)\n GPIO.output(12, GPIO.LOW)\n time.sleep(1)\n #Terug naar begin loop\n else:\n #Niet gelukt om binnen 10 seconden code in te voeren\n #Rode LED blinkt 5 seconden\n for i in range(5):\n GPIO.output(12,GPIO.HIGH)\n time.sleep(1)\n GPIO.output(12,GPIO.LOW)\n time.sleep(1)\n #Terug naar begin loop\n elif (GPIO.input(17) == True):\n #Deur geopend zonder hek te openen\n #Rode LED blinkt 5 seconden\n for i in range(5):\n GPIO.output(12,GPIO.HIGH)\n time.sleep(1)\n GPIO.output(12,GPIO.LOW)\n time.sleep(1)\n #Terug naar begin loop\n else:\n print(\"Not Pressed\")\n time.sleep(1)\n\n\n","sub_path":"Systeem.py","file_name":"Systeem.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"425564707","text":"import pandas as pd\nimport numpy as np\nimport unionfind as uf\nimport itertools\nimport matplotlib as mpl\nimport seaborn as sns\nimport sys\nfrom sklearn.metrics import adjusted_rand_score\nfrom scipy.spatial.distance import squareform, pdist\nfrom scipy.spatial import distance_matrix\n\ndef find(q, points):\n s = q\n x = points.loc[q]\n while x['parent'] != q:\n r = q\n q = x['parent']\n points.loc[r, 'parent'] = q\n x = points.loc[q]\n points.loc[s, 'cluster'] = points.loc[q, 'cluster']\n return q\n\ndef union(p, q, points):\n s = find(q, points)\n points.loc[s, 'parent'] = p\n\ndef slink(file, kMin, kMax):\n df=pd.read_csv(file, sep='\\t',header=0)\n real_clusters = pd.read_csv(file.replace('.txt', 'Real.clu'), sep='\\t',header=None)\n points = df.iloc[:, :3]\n points['cluster'] = np.arange(points.shape[0])\n points['n'] = 1\n points['parent'] = np.arange(points.shape[0])\n clusters = points[['sample_label', 'cluster']]\n k = points.shape[0]\n dists = pd.DataFrame(squareform(pdist(df.iloc[:, 1:3])), columns=clusters.cluster.unique(), index=clusters.cluster.unique())\n for i in range(len(dists)): \n dists.iat[i, i] = np.nan\n\n\n for i in range(0, points.shape[0] - 1):\n if k <= int(kMax) and k >= int(kMin):\n [find(x, points) for x in np.arange(points.shape[0])]\n print ('K = %d' % k)\n l_dict = dict(zip(set(points['cluster']), range(len(points['cluster']))))\n points = points.assign(normalized_cluster = [l_dict[x] for x in points['cluster']])\n print('ARI (Hubert-Arabie) = %lf' % adjusted_rand_score(real_clusters.values[:, 1], points['normalized_cluster'].values))\n points[['sample_label', 'normalized_cluster']].to_csv(file.split('.txt')[0] + '-slink' + str(k) + '.clu', sep='\\t', index=False, header=None)\n figname = 'plots/slink/' + file.split('.txt')[0].split('/')[1] + '-k-' + str(k) + '.png'\n sns.pairplot(x_vars=[\"d1\"], y_vars=[\"d2\"], data=points, hue=\"normalized_cluster\", height=5).savefig(figname)\n\n\n\n k = k - 1\n\n p = dists.min().idxmin()\n q = dists.idxmin().loc[p]\n r = min(p,q)\n s = max(p,q)\n union(r, s, points)\n points.loc[r, 'n'] = points.loc[r, 'n'] + points.loc[s, 'n']\n dists[r] = pd.concat([dists[r], dists[s]], axis=1).min(axis=1)\n dists.loc[r] = pd.concat([dists.loc[r], dists.loc[s]], axis=1).min(axis=1)\n dists = dists.drop(s)\n dists = dists.drop(s, axis=1)\n dists.loc[r,r] = np.nan\n\n return points\n\nif __name__ == \"__main__\":\n slink(sys.argv[1], sys.argv[2], sys.argv[3])","sub_path":"slink.py","file_name":"slink.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"358841061","text":"\r\nfrom dbpostgres import DbHelper\r\nfrom NotificationSheet import NotificationSheet\r\n\r\nclass NotificationTable:\r\n\r\n def __init__(self, table_id):\r\n self.table_id = table_id\r\n db = DbHelper()\r\n sheets_db = db.execute_select(\"SELECT * FROM notificator.google_sheets WHERE table_id='{}'\".format(table_id))\r\n self.sheets = []\r\n for sheet in sheets_db:\r\n self.sheets.append(NotificationSheet(sheet[1],sheet[2],sheet[3],sheet[4],sheet[5],sheet[6],sheet[7]))\r\n\r\n \r\n\r\n\r\n","sub_path":"bot/NotificationTable.py","file_name":"NotificationTable.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"44869757","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport timeit\n\ndef fak1(n):\n res = 1\n for i in range(2, n+1):\n res *= i\n return res\n \ndef fak2(n):\n if n > 0:\n return fak2(n-1)*n\n else:\n return 1\n\nt1 = timeit.Timer(\"fak1(50)\", \"from __main__ import fak1\")\nt2 = timeit.Timer(\"fak2(50)\", \"from __main__ import fak2\")\nprint(\"Iterativ: \", t1.timeit())\nprint(\"Rekursiv: \", t2.timeit())\nprint(\"Iterativ: \", min(t1.repeat(100, 10000)))\nprint(\"Rekursiv: \", min(t2.repeat(100, 10000)))\n","sub_path":"00_Original/35_Debugging_und_Qualitaetssicherung/Analyse_des_Laufzeitverhaltens/beispiel_timeit.py","file_name":"beispiel_timeit.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"423515799","text":"a,b,n=list(map(int,input().split()))\nflag=0\na=str(a)\nfor j in range(10):\n\ttemp=str(a)+str(j)\n\tif int(temp)%b==0:\n\t\ta=temp\n\t\tbreak\nelse:\n\tflag=1\n\nif flag:\n\tprint(-1)\nelse:\n\tprint(a+\"0\"*(n-1))","sub_path":"Ladder_11(Less_1300)/A_Adding_Digits.py","file_name":"A_Adding_Digits.py","file_ext":"py","file_size_in_byte":190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"337363718","text":"\nclass Event:\n def __init__(self):\n self.events = dict()\n self.evID = 0\n\n def addNewEvent(self, eventYear, eventMonth, eventDay,\n eventTime, eventType, eventDuration,\n eventDescription):\n event = [eventYear, eventMonth, eventDay,\n eventTime, eventType,eventDuration,\n eventDescription]\n self.events[self.evID] = event\n self.evID += 1\n\n\n def printEventsbyMonth(self, eventYear, eventMonth):\n if len(self.events) == 0:\n print('There are no events!\\n')\n return\n print('{0} {1}'.format('Events for ', self.getMonthNameByNumber\n (\n month=eventMonth,\n year=eventYear\n )).center(30, '~'))\n for key, evlist in self.events.items():\n if isinstance(evlist, list):\n if evlist[0] == eventYear and evlist[1] == eventMonth:\n print('Event ID-{0}: {1} on {2}/{3}/{4} at {5} with a duration of {6}.'\n .format(key, evlist[4],\n evlist[0], evlist[1],\n evlist[2], evlist[3],\n evlist[5]\n ))\n print('Description: {0}'.format(evlist[6]))\n print('\\n')\n\n\n def printEventsbyDay(self, eventYear, eventMonth, eventDay):\n if len(self.events) == 0:\n print('There are no events!\\n')\n return\n print('{0} {1} {2}'.format('Events for ', self.getMonthNameByNumber\n (\n month=eventMonth, year=eventYear\n ),\n eventDay).center(30, '~'))\n for key, evlist in self.events.items():\n if isinstance(evlist, list):\n if evlist[0] == eventYear and evlist[1] == eventMonth and evlist[2] == eventDay:\n print('Event ID-{0}: {1} on {2}/{3}/{4} at {5} with a duration of {6}.'\n .format(key, evlist[4],\n evlist[0], evlist[1],\n evlist[2], evlist[3],\n evlist[5]))\n print('Description: {0}'.format(evlist[6]))\n print('\\n')\n\n def removeEvent(self, eventYear, eventMonth, eventDay, eventTime, eventDuration):\n if len(self.events) == 0:\n print('There are no events!\\n')\n return\n print('Removing Event...')\n for key, evlist in self.events.items():\n if isinstance(evlist, list):\n if evlist[0] == eventYear and evlist[1] == eventMonth and evlist[2] == eventDay:\n if evlist[3] == eventTime and evlist[5] == eventDuration:\n self.events.pop(key)\n print('Removed Successfully!')\n break\n\n def removeEventByID(self, eventID):\n if len(self.events) == 0:\n print('There are no events!\\n')\n return\n if self.events.get(eventID) != None:\n self.events.pop(eventID)\n else:\n print('Event ID Incorrect or doesn\\'t exist')\n\n # Used for updating event in the user interface\n def getEventByID(self, eventID):\n if self.events.get(eventID) == None: return 'Event ID incorrect or doesn\\'t exist!'\n return list(self.events.get(eventID))\n\n \n def updateEvent(self, eventID, evYear, evMonth, evDay, evTime, evType, evDuration, evDescription):\n if len(self.events) == 0:\n print('There are no events!\\n')\n return \n self.events[eventID] = [evYear, evMonth, evDay, evTime, evType, evDuration, evDescription]\n\n def updateEventByDay(self, eventYear, eventMonth, eventDay, eventTime, eventDuration):\n if len(self.events) == 0:\n print('There are no events!\\n')\n return\n for key in self.events.keys():\n if isinstance(self.events.get(key), list):\n evList = list(self.events.get(key))\n if evList[0] == eventYear and evList[1] == eventMonth and evList[2] == eventDay:\n self.printEventsbyDay(eventYear,eventMonth,eventDay)\n eventID = int(input('Enter event ID: '))\n if key+1 == eventID:\n evList[0] = int(input('Update year: '))\n evList[1] = int(input('Update month: '))\n evList[2] = int(input('Update day: '))\n evList[3] = str(input('Update time: '))\n evList[4] = str(input('Update type: '))\n evList[5] = str(input('Update duration: '))\n evList[6] = str(input('Update description: '))\n\n def getMonthNameByNumber(self, year, month):\n from Calendar import Calendar\n cal = Calendar()\n cal.printMonth(month= month, year= year)\n return cal.months[month]\n \n #Methods to print all events dates in the format YYYY/MM/DD\n def getAllEventsDatesByDay(self):\n if len(self.events) == 0:\n print('There are no events!\\n')\n return\n print('All dates with events YYYY/MM/DD')\n months = set()\n for key, evlist in self.events.items():\n months.add((evlist[0], evlist[1], evlist[2]))\n for year, month, day in months:\n print('>> {0}/{1}/{2}\\n'.format(year, month, day) )\n\n def getAllEventsDatesByMonth(self):\n if len(self.events) == 0:\n print('There are no events!\\n')\n return\n print('All dates with events YYYY/MM/DD')\n months = set()\n for key, evlist in self.events.items():\n months.add((evlist[0], evlist[1]))\n for year, month in months:\n print('>> {0}/{1}\\n'.format(year, month))\n\n #Method to print all the events of the application\n def printAllEvents(self):\n if len(self.events) == 0:\n print('There are no events!\\n')\n return\n print('All Events')\n for key, evlist in self.events.items():\n print('Event ID-{0}: {1} on {2}/{3}/{4} at {5} with a duration of {6}.'\n .format(key, evlist[4],\n evlist[0], evlist[1],\n evlist[2], evlist[3],\n evlist[5]))\n print('Description: {0}'.format(evlist[6]))\n print('\\n')\n \n def isEmpty(self):\n isEmpty = False\n if len(self.events != 0): \n isEmpty = False \n else: \n isEmpty = True\n return isEmpty\n \n def getEvents(self):\n return self.events","sub_path":"Event.py","file_name":"Event.py","file_ext":"py","file_size_in_byte":6751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"360629458","text":"'''\n parse pascal VOC xml\n\n @author neucrack\n @license MIT © 2020 neucrack\n'''\n\n\n\n\nimport re\n\n\ndef decode_pascal_voc_xml(xml_path, ordered = False):\n '''\n @ordered parse labelimg ordered xml by RE, or will use xml parser\n @reuturn bool, info\n res = {\n \"filename\": ,\n \"path\": ,\n \"width\": ,\n \"height\": ,\n \"depth\": ,\n \"bboxes\": [(xmin, ymin, xmax, ymax, label, difficult)]\n }\n '''\n if ordered:\n with open(xml_path) as f:\n xml = f.read()\n try:\n rule = \"(.*).*(.*).*.*(.*).*(.*).*(.*).*\"\n match = re.findall(rule, xml, re.MULTILINE|re.DOTALL)\n if len(match) < 1:\n return False, \"decode error\"\n res = {\n \"filename\": match[0][0].replace(\"\\\\\", \"/\"),\n \"path\": match[0][1].replace(\"\\\\\", \"/\"),\n \"width\": int(match[0][2]),\n \"height\": int(match[0][3]),\n \"depth\": int(match[0][4]),\n \"bboxes\": []\n }\n rule = \".*?(.*?).*?(.*?).*?.*?(.*?).*?(.*?).*?(.*?).*?(.*?).*?.*?\"\n match = re.findall(rule, xml, re.MULTILINE|re.DOTALL)\n if len(match) < 1:\n return False, \"no object in this iamge\"\n for bbox in match:\n bbox = [int(bbox[2]), int(bbox[3]), int(bbox[4]), int(bbox[5]), bbox[0], int(bbox[1])]\n res[\"bboxes\"].append(bbox)\n except Exception as e:\n return False, \"decode error: {}\".format(e)\n return True, res\n try:\n from xml.etree.ElementTree import parse\n tree = parse(xml_path)\n root = tree.getroot()\n filename = root.find(\"filename\").text\n path = root.find(\"path\")\n width = -1\n height = -1\n depth = -1\n for elem in tree.iter():\n if \"width\" in elem.tag:\n width = int(elem.text)\n elif \"height\" in elem.tag:\n height = int(elem.text)\n elif \"depth\" in elem.tag:\n depth = int(elem.text)\n obj_tags = root.findall(\"object\")\n res = {\n \"filename\": filename,\n \"path\": filename if path is None else path.text,\n \"width\": width,\n \"height\": height,\n \"depth\": depth,\n \"bboxes\": []\n }\n res[\"filename\"] = res[\"filename\"].replace(\"\\\\\", \"/\")\n res[\"path\"] = res[\"path\"].replace(\"\\\\\", \"/\")\n for t in obj_tags:\n name = t.find(\"name\").text\n box_tag = t.find(\"bndbox\")\n difficult = int(t.find(\"difficult\").text)\n x1 = int(float(box_tag.find(\"xmin\").text))\n y1 = int(float(box_tag.find(\"ymin\").text))\n x2 = int(float(box_tag.find(\"xmax\").text))\n y2 = int(float(box_tag.find(\"ymax\").text))\n bbox = [x1, y1, x2, y2, name, difficult ]\n res[\"bboxes\"].append(bbox)\n return True, res\n except Exception as e:\n return False, \"decode error: {}\".format(e)\n","sub_path":"train/detector/parse_pascal_voc_xml.py","file_name":"parse_pascal_voc_xml.py","file_ext":"py","file_size_in_byte":3429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"605608181","text":"from __future__ import absolute_import\nfrom __future__ import print_function\n\nfrom typing import Any\n\nfrom argparse import ArgumentParser\nfrom optparse import make_option\n\nfrom django.core.management.base import BaseCommand, CommandParser\n\nfrom zerver.lib.actions import bulk_remove_subscriptions\nfrom zerver.models import Realm, UserProfile, get_realm, get_stream, \\\n get_user_profile_by_email\n\nclass Command(BaseCommand):\n help = \"\"\"Remove some or all users in a realm from a stream.\"\"\"\n\n def add_arguments(self, parser):\n # type: (CommandParser) -> None\n parser.add_argument('-r', '--realm',\n dest='string_id',\n type=str,\n help='The subdomain or string_id of the realm in which you are '\n 'removing people.')\n\n parser.add_argument('-s', '--stream',\n dest='stream',\n type=str,\n help='A stream name.')\n\n parser.add_argument('-u', '--users',\n dest='users',\n type=str,\n help='A comma-separated list of email addresses.')\n\n parser.add_argument('-a', '--all-users',\n dest='all_users',\n action=\"store_true\",\n default=False,\n help='Remove all users in this realm from this stream.')\n\n def handle(self, **options):\n # type: (*Any, **Any) -> None\n if options[\"string_id\"] is None or options[\"stream\"] is None or \\\n (options[\"users\"] is None and options[\"all_users\"] is None):\n self.print_help(\"./manage.py\", \"remove_users_from_stream\")\n exit(1)\n\n realm = get_realm(options[\"string_id\"])\n stream_name = options[\"stream\"].strip()\n stream = get_stream(stream_name, realm)\n\n if options[\"all_users\"]:\n user_profiles = UserProfile.objects.filter(realm=realm)\n else:\n emails = set([email.strip() for email in options[\"users\"].split(\",\")])\n user_profiles = []\n for email in emails:\n user_profiles.append(get_user_profile_by_email(email))\n\n result = bulk_remove_subscriptions(user_profiles, [stream])\n not_subscribed = result[1]\n not_subscribed_users = {tup[0] for tup in not_subscribed}\n\n for user_profile in user_profiles:\n if user_profile in not_subscribed_users:\n print(\"%s was not subscribed\" % (user_profile.email,))\n else:\n print(\"Removed %s from %s\" % (user_profile.email, stream_name))\n","sub_path":"zerver/management/commands/remove_users_from_stream.py","file_name":"remove_users_from_stream.py","file_ext":"py","file_size_in_byte":2734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"130501633","text":"'''\nThis program started with a thread on the pythonista message boards here:\n\thttps://forum.omz-software.com/topic/2066/python-opengles\n\t\nbut then I found the code here:\n\thttps://github.com/Cethric/OpenGLES-Pythonista/blob/3c2332dcc31a091c0388f258e6ae4f7bbce89445/Util/Shader.py\n\nthis forum post on pythonista helped with glCreateShader:\n\thttps://forum.omz-software.com/topic/4650/trouble-with-opengl-ctypes-and-pythonista\n\t\nDraw a simple triangle\n'''\n#!python2\nfrom ctypes import *\nfrom objc_util import *\nimport time\nimport colorsys\nimport sys\nfrom GLConstants import *\n\nGLKView = ObjCClass('GLKView')\nGLKViewController = ObjCClass('GLKViewController')\nUINavigationController = ObjCClass('UINavigationController')\nUIBarButtonItem = ObjCClass('UIBarButtonItem')\nEAGLContext = ObjCClass('EAGLContext')\n\n#functions defined here we should put this in a different file\n\n#glClearColor\nglClearColor = c.glClearColor\nglClearColor.restype = None\nglClearColor.argtypes = [GLfloat, GLfloat, GLfloat, GLfloat]\n\n#glClear\nglClear = c.glClear\nglClear.restype = None\nglClear.argtypes = [GLbitfield]\n\n#glCreateShader\nglCreateShader = c.glCreateShader\nglCreateShader.restype = GLuint\nglCreateShader.argtypes = [GLenum]\n\n#glShaderSource\nglShaderSource = c.glShaderSource\nglShaderSource.restype = None\nglShaderSource.argtypes = [GLuint, GLsizei, POINTER(c_char_p), POINTER(GLint)]\n\n#glCompileShader\nglCompileShader = c.glCompileShader\nglCompileShader.restype = None\nglCompileShader.argtypes = [GLuint]\n\n#glGetShaderiv\nglGetShaderiv = c.glGetShaderiv\nglGetShaderiv.restype = None\nglGetShaderiv.argtypes = [GLuint, GLenum, POINTER(GLint)]\n\n#glGetSahderInfoLog(shader, N, byref(log_length), byref(info_log))\nglGetShaderInfoLog = c.glGetShaderInfoLog\nglGetShaderInfoLog.restype = None\nglGetShaderInfoLog.argtypes = [GLuint, GLsizei, POINTER(GLsizei), POINTER(GLchar)]\n\n#constants are defined here\nGL_COLOR_BUFFER_BIT = 0x00004000\nGL_COMPILE_STATUS = 0x8B81\nGL_VERTEX_SHADER = 0x8B31\nGL_FRAGMENT_SHADER = 0x8B30\nGL_INFO_LOG_LENGTH = 0x8B84\nGL_LINK_STATUS = 0x8B82\nGL_FLOAT = 0x1406\nGL_FALSE = 0x0000\nGL_TRIANGLES = 0x0004\n\ndef load_shader(shader_type, shader_source):\n\tshader = c_uint32(0)\n\tcompiled = c_int(0)\n\t\n\tshader = glCreateShader(shader_type)\n\tif shader == 0:\n\t\tprint(\"error in load_shader\")\n\t\tsys.exit(200)\n\t\n\t# load the shader source\n\tglShaderSource(shader, 1, byref(cast(shader_source, c_char_p)), None)\n\n\t# compile the shader\n\tglCompileShader(shader)\n\t# check the compile status\n\tglGetShaderiv(shader, GL_COMPILE_STATUS, byref(compiled))\n\tif compiled.value != 1:\n\t\tprint(\"load shader failed to compile code %d\" % (compiled.value))\n\t\tinfo_length = c_int32(0)\n\t\tglGetShaderiv(shader, GL_INFO_LOG_LENGTH, byref(info_length))\n\t\tif info_length.value > 1:\n\t\t\tN = 1024\n\t\t\tinfo_log = (GLchar * N)()\n\t\t\tlog_length = c_int()\n\t\t\tglGetShaderInfoLog(shader, N, byref(log_length), cast(info_log, c_char_p))\n\t\t\tprint(\"error from OpenGL compiler: %s\" % info_log.value)\n\t\treturn 0\n\treturn shader\n\t\ndef glkView_drawInRect_(_self, _cmd, view, rect):\n glClearColor(1.0, 0.0, 0.0, 1.0) # red\n #glClearColor(0.0, 1.0, 0.0, 1.0) # green\n #glClearColor(0.0, 0.0, 1.0, 1.0) # blue\n glClear(GL_COLOR_BUFFER_BIT)\nMyGLViewDelegate = create_objc_class('MyGLViewDelegate', methods=[glkView_drawInRect_], protocols=['GLKViewDelegate'])\n\ndef dismiss(_self, _cmd):\n print(\"dismiss closing down\")\n self = ObjCInstance(_self)\n self.view().delegate().release()\n self.view().setDelegate_(None)\n self.dismissViewControllerAnimated_completion_(True, None)\n\ndef viewDidLoad(_self, _cmd):\n print(\"viewDidLoad loading up MyGLViewController\")\n self = ObjCInstance(_self)\nMyGLViewController = create_objc_class('MyGLViewController', GLKViewController, methods=[dismiss, viewDidLoad])\n\n@on_main_thread\ndef main():\n context = EAGLContext.alloc().initWithAPI_(2).autorelease()\n glview = GLKView.alloc().initWithFrame_(((0, 0), (320, 320))).autorelease()\n delegate = MyGLViewDelegate.alloc().init()\n glview.setDelegate_(delegate)\n glview.setContext_(context)\n glview.setEnableSetNeedsDisplay_(False)\n glvc = MyGLViewController.alloc().initWithNibName_bundle_(None, None).autorelease()\n glvc.setTitle_('GLKit Demo Hello Triangle.py')\n glvc.setView_(glview)\n done_b = UIBarButtonItem.alloc().initWithTitle_style_target_action_('Done', 2, glvc, 'dismiss').autorelease()\n glvc.navigationItem().setRightBarButtonItem_(done_b)\n nav = UINavigationController.alloc().initWithRootViewController_(glvc)\n rootvc = UIApplication.sharedApplication().keyWindow().rootViewController()\n rootvc.presentModalViewController_animated_(nav, True)\n nav.release()\n vShaderStr = (b\"attribute vec4 vPosition;void main(){gl_Position = vPosition;}\")\n vertex_shader = load_shader(GL_VERTEX_SHADER, vShaderStr)\n fShaderStr = (b\"precision mediump float; void main(void){ gl_FragColor = vec4(0.0, 0.0, 0.0, 1.0);}\")\n fragment_shader = load_shader(GL_FRAGMENT_SHADER, fShaderStr)\n\nmain()","sub_path":"iPad/Triangle.py","file_name":"Triangle.py","file_ext":"py","file_size_in_byte":4987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"457998788","text":"# -*- coding:utf-8 -*-\n# 根据导出结果,计算总框数。\nimport os\nimport json\n\n\nbaseDir1 = str(input(\"请输入第一个需要统计的文件夹路径:\"))\nbaseDir2 = str(input(\"请输入第二个需要统计的文件夹路径:\"))\nbaseDir3 = str(input(\"请输入第三个需要统计的文件夹路径:\"))\n\n\ndef get_all_files(baseDir):\n \"\"\"得到所有需要统计的文件\"\"\"\n # 列出当前目录下所有文件和文件夹\n files = os.listdir(baseDir)\n # 遍历当前目录下的所有文件\n count = 0\n for file in files:\n # 路径拼接\n abs_path = os.path.join(baseDir, file)\n # 打开abs_path所指向路径下所有遍历出来的文件\n with open(abs_path, \"r\", encoding='gbk') as f:\n tag_record = f.read()\n # json字符串转为json对象\n json_obj = json.loads(tag_record)\n # 提取数据,类型是一个列表\n json_arr = json_obj['markResult']['features']\n # 计算长度(框数)\n count += len(json_arr)\n return count\n\n\ndef calculate_boxes(count1, count2, count3):\n \"\"\"计算总框数\"\"\"\n count_sum = count1 + count2 + count3\n print(\"总框数是 %d 个\" % count_sum)\n\n\ndef main():\n count1 = get_all_files(baseDir1)\n count2 = get_all_files(baseDir2)\n count3 = get_all_files(baseDir3)\n calculate_boxes(count1, count2, count3)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"needDownload/commonTools/02_calculatelBoxes.py","file_name":"02_calculatelBoxes.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"44954570","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 28 15:19:28 2020\n\n@author: MARAT\n\"\"\"\n\nimport re\n\ntxt = \"The rain in Spain\"\nx = re.search(\"ai\", txt)\nprint(x) #this will print an object","sub_path":"lab_5/10.py","file_name":"10.py","file_ext":"py","file_size_in_byte":182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"72278711","text":"#file stardust.py\n#author 凡尘(Kevin Brooks)\n#version 1.0\n#date 2020_08_04\n#github https://github.com/stardust-kevin\n#copyright Copyright (C) 2016, Stardust Studio, All Rights Reserved\n#license http://www.apache.org/licenses/LICENSE-2.0\n\n#导入基本功能cv2库\nimport cv2\nimport numpy as np\n\n\nfilename = 'board.jpg'\nimg = cv2.imread(filename)\ngray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\ngray = np.float32(gray)\n\ndst = cv2.cornerHarris(gray,2,3,0.1)\ndst = cv2.dilate(dst,None)\nimg[dst>0.01*dst.max()]=[0,0,255]\n\ncv2.imshow('dst',img)\nif cv2.waitKey(0) & 0xff == 27:\n cv2.destroyAllWindows()","sub_path":"资源/实例代码/8.5.1 Harris角点检测/stardust/stardust.py","file_name":"stardust.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"224627540","text":"import pandas as pd\nimport censusdata\nfrom params import *\nfrom tqdm import tqdm\nfrom ast import literal_eval\nimport yaml\nimport psycopg2 as pg2\nimport numpy as np\nimport re \n\ntry:\n with open('secrets.yaml', 'r') as f:\n # loads contents of secrets.yaml into a python dictionary\n secret_config = yaml.safe_load(f.read())\n db_params = secret_config['db']\n API_KEY = secret_config['web_resource']['api_key']\nexcept FileNotFoundError:\n print(\"Could not find secrets.yaml file. Proceeding without API_KEY. Database connection will not be established.\")\n db_params = None\n API_KEY = None\n\n\n\ndef open_db_connection():\n \"\"\"\n Opens connection to psql db\n\n :return:\n connection object\n \"\"\"\n if db_params is None:\n print(\"Cannot establish connection to database. Please provide db_params in secrets.yaml file.\")\n exit(1)\n\n conn = pg2.connect(\n host=db_params['host'],\n port=db_params['port'],\n dbname=db_params['dbname'],\n user=db_params['user'],\n password=db_params['password']\n )\n print(f\"Connection opened to database {db_params['dbname']}\")\n return conn\n\n\nclass geoLevel():\n \"\"\"\n Geographical Granularity object that store the\n name of the geographical level to obtain the data from\n and its position in the hierarchy\n\n :param:\n geo : Name of the geographical level\n must be one of ['us', 'state', 'county', 'tract', 'block group', 'block']]\n \"\"\"\n def __init__(self, geo):\n self.name = geo\n if geo == 'us' or geo == 'state':\n self.level = 0\n elif geo in ['county', \"state legislative district (upper chamber)\", \"state legislative district (lower chamber)\"]:\n self.level = 1\n elif geo == 'tract':\n self.level = 2\n elif geo == 'block group' or geo == 'block':\n self.level = 3\n else:\n raise AssertionError(\"geo must be one of ['us', 'state', 'county', 'tract', 'block group', 'block']]\")\n\n\ndef get_variables(filename):\n \"\"\"\n Get variables from a file, storing one variable 3-tuple per line.\n The variable 3-tuple is of the form as returned by censusdata.search()\n\n :param:\n filename (string) : name of the file holding the variables\n\n :return:\n a list of variable id names\n \"\"\"\n with open(filename, 'r') as f:\n all_tuples = f.readlines()\n\n def oper(tuple, i):\n tuple = literal_eval(tuple)\n return tuple[i]\n\n vars = [oper(tuple, 0) for tuple in all_tuples]\n headers = [oper(tuple, 1) for tuple in all_tuples]\n return vars, headers\n\n\ndef fix_index(data):\n \"\"\"\n Improve the index from one complex string entry, to multiple simpler int entries\n\n :param:\n data (pandas.DataFrame): Index of the format censusdata.censusdata.censusgeo\n\n :return:\n a pandas.DataFrame object with added entries for stateID, [countyID], [tractID], [BlockGroupID]\n \"\"\"\n def fix_names(name):\n name = re.sub(\" \", \"_\", name)\n name = re.sub(r'(\\W)', \"\", name)\n return name\n \n index_list = data.index.tolist()\n # print(index_list)\n new_entries = [entry.params() for entry in index_list]\n # print(new_entries)\n \n headers = []\n index_size = len(new_entries[0])\n for i in range(index_size):\n headers.append(new_entries[0][i][0])\n new_entries = [[e[1] for e in entry] for entry in new_entries]\n new_entries = np.array(new_entries)\n\n headers = [fix_names(h) for h in headers]\n\n data = data.reset_index(drop=True)\n for i in range(index_size):\n data.insert(i, headers[i].strip(), new_entries[:, i])\n\n return data\n\n\ndef download_data(vars):\n \"\"\"\n function to download data from the ACS website\n\n :param:\n geo_level (geoLevel object): which geophical granularity to obtain for the data\n vars (string): a file name that holds 3-tuples of the variables,\n (in the format returned by censusdata.search()),\n where first is the variable id, and second is the variable header.\n :return:\n a pandas.DataFrame object\n \"\"\"\n gl = geoLevel(geo_level_name)\n print(f\"Getting {gl.name} level geographies...\")\n geographies = get_censusgeos(gl)\n vars, headers = get_variables(vars)\n data = []\n print(\"Downloading selected variables for these geographies...\")\n for geo in tqdm(geographies):\n local_data = censusdata.download(data_source, year, geo, vars, tabletype=tabletype, key=API_KEY)\n data.append(local_data)\n data = pd.concat(data)\n data.columns = headers\n data = fix_index(data)\n return data\n\n\ndef _get_geo(geotype, names=None, higher_list=None):\n \"\"\"\n Helper function to obtain geographies from one level to the next\n\n :param:\n geotype (string) : name of the geography (e.g. 'state')\n names (list of string) : names of the specific geographical location\n you want to pull the data for,\n or None if you want it for all\n higher_list (list of tuples of string):\n the list of the higher level hierarchy of geo locations\n reaching upto that level\n (e.g. if 'geo' is 'tract', then this could be\n [('state', 'Pennsylvania'), ('county', 'York County')] )\n\n :return:\n list of censusgeo objects\n \"\"\"\n if higher_list is None:\n higher_list = []\n geo = [censusdata.censusgeo(higher_list + [(geotype, '*')])]\n if names is not None:\n all_geos = censusdata.geographies(geo[0], data_source, year, key=API_KEY)\n geo = []\n for name in names:\n geo.append(all_geos[name])\n\n return geo\n\n\ndef get_censusgeos(geo_level):\n \"\"\"\n Gets the censusgeo objects for the specified geography,\n and specific names (if specified)\n\n :param:\n geo_level (geoLevel object): geo level at which granularity the data\n needs to be obtained at\n :return:\n a list of censusgeo objects\n \"\"\"\n\n # to obtain natiowide data\n if geo_level.name == 'us':\n final_geo = _get_geo('us')\n\n # to obtain state wise data\n else:\n state_geos = _get_geo('state', state_names)\n final_geo = state_geos\n\n # get the county level or legislative district level geographies\n if geo_level.level >= 1:\n # iterate over the states\n county_geos = []\n for i in range(len(state_names)):\n state_name = state_names[i]\n if county_names is None or county_names[i] is None:\n county_state_names = None\n else:\n county_state_names = [cn+\", \"+state_name for cn in county_names[i]]\n geo = _get_geo(geo_level.name, county_state_names, list(state_geos[i].params()))\n\n # Census API doesn't support using wildcards for 'county' for lower levels of hierarchy\n if geo_level.level > 1 and county_state_names is None:\n all_geos = censusdata.geographies(geo[0], data_source, year, key=API_KEY)\n geo = all_geos.values()\n\n county_geos += geo\n final_geo = county_geos\n\n # the following part could be done in a simpler manner than what is done below, but this implementation\n # allows easy extension to the cases where we might need to specify specific tracts and blocks.\n\n # getting all tracts or blocks or block groups\n for level in [2,3]:\n if geo_level.level >= level:\n if level == 2:\n name = 'tract'\n else:\n name = geo_level.name # could be 'block' or 'block group'\n level_geos = []\n for i in range((len(final_geo))):\n geo = _get_geo(name, None, list(final_geo[i].params()))\n level_geos += geo\n final_geo = level_geos\n else:\n break\n\n return final_geo\n\n\n","sub_path":"acs_scripts/gather_data.py","file_name":"gather_data.py","file_ext":"py","file_size_in_byte":7904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"13751658","text":"import numpy as np\nimport pandas as pd\n\nfrom .perceptron import PerceptronModel\n\n\nclass AveragePerceptronModel(PerceptronModel):\n averaged_weights = pd.Series()\n\n def calc_weights_per_epoch(self, X: pd.DataFrame, y: pd.Series):\n X.reset_index(drop=True, inplace=True)\n y.reset_index(drop=True, inplace=True)\n for i, y_i in y.iteritems():\n w = self.weights.values.T\n x_i = X.iloc[i].values\n is_error = y_i * np.dot(w, x_i) <= 0\n if is_error:\n weight_change = self.rate * x_i * y_i\n self.convergence_of_weights = self.convergence_of_weights.append(\n pd.Series(self.compute_norm(\n (self.weights + weight_change).values - self.weights.values)\n )\n ).reset_index(drop=True)\n self.weights = self.weights + weight_change\n else:\n if self.averaged_weights.empty:\n self.averaged_weights = self.weights.copy()\n else:\n self.averaged_weights += self.weights\n\n def test(self, X: pd.DataFrame, y: pd.Series) -> float:\n X['MODEL_BIAS'] = -1\n y_hat = self.evaluate(X)\n s = y.to_numpy() == y_hat\n return np.sum(s) / len(s)\n\n def evaluate(self, X: pd.DataFrame) -> np.ndarray:\n return np.sign(np.dot(X.to_numpy(), self.averaged_weights.to_numpy()))\n","sub_path":"Perceptron/average_perceptron.py","file_name":"average_perceptron.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"47287526","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 9 15:30:58 2019\n\n@author: benjaminforleo\n\"\"\"\n\n# %%\nimport pandas as pd\nimport numpy as np\n\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nimport plotly.graph_objs as go\n\nimport os\nos.chdir('/Users/benjaminforleo/spring2019_time_tracking')\n\ndf = pd.read_pickle(\"./data/toggle_time_entries_clean.pkl\")\n\nwith open(\"./text/header.txt\", 'r') as header:\n header = header.read()\n \nwith open(\"./text/p1.txt\", 'r') as p1:\n p1 = p1.read()\n\n# %% pre-processing\n\nhours_per_day = df.groupby(df.index)['duration'].sum()\n\ndates = pd.date_range(hours_per_day.index.min(), hours_per_day.index.max())\n\nhours_per_day = hours_per_day.reindex(dates)\n\n\n# %% app\n\n\napp = dash.Dash()\n\napp.layout = html.Div([\n html.Div([\n html.H1([header], style = {'white-space': 'pre-wrap', 'color':'#f4f4f4'}),\n html.P([p1], style = {'white-space': 'pre-wrap', 'color':'#f4f4f4'}),\n html.Hr(),\n html.Div([\n html.Div([dcc.Slider(id = 'moving_average', min = 1, max = 28, step = 7, value = 1)], style = {'width':'25%', 'float':'left'}),\n dcc.Graph(id = 'time_series', style = {'width':'75%', 'float':'left'})])\n \n ], style = {'width':'75%', 'margin':'auto', 'background-color':'rgb(37, 55, 70)'})\n ], style = {'background-color':'#0044bb'})\n\n@app.callback(Output('time_series', 'figure'),\n [Input('moving_average','value')])\ndef time_series_analysis(moving_average):\n \n data = [go.Scatter(x = hours_per_day.index,\n y = hours_per_day.values,\n mode = 'lines')]\n \n return go.Figure(data)\n\n\nif __name__ == '__main__':\n app.run_server()\n\n\n","sub_path":"03. dashboard.py","file_name":"03. dashboard.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"182586245","text":"#!/usr/bin/env python3\n#author:Alnk(李成果)\nimport sys,os,time\nimport json\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) #获取目录\nfrom Atm.core.login import login #登录认证模块\n\ndef read_file():\n \"\"\"读取数据文件操作\"\"\"\n file_path = BASE_DIR + \"/Atm/db/user.txt\"\n with open(file_path,\"r\") as f:\n user_dict = json.load(f)\n return user_dict\n\ndef write_file(user_dict):\n \"\"\"写入数据到文件\"\"\"\n file_path = BASE_DIR + \"/Atm/db/user.txt\"\n with open(file_path, \"w\") as f:\n json.dump(user_dict,f)\n\ndef bill_read():\n \"\"\"账单数据读入\"\"\"\n bill_path = BASE_DIR + \"/Atm/db/billing_record.txt\"\n with open(bill_path, \"r\") as f:\n bill_dict = json.load(f)\n return bill_dict\n\ndef bill_write(user_card,purpose,cash):\n \"\"\"账单数据写入\"\"\"\n bill_path = BASE_DIR + \"/Atm/db/billing_record.txt\"\n bill_dict = bill_read()\n time_key = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))\n bill_dict[user_card][time_key] = {purpose: cash}\n with open(bill_path, \"w\") as f:\n json.dump(bill_dict,f)\n\ndef user_info(user_card):\n \"\"\"账户信息\"\"\"\n user_dict = read_file()\n user_info = user_dict[user_card][\"available_quota\"]\n print(\"\\n你的可用额度为: %s\"%(user_info))\n\ndef repayment(user_card):\n \"\"\"还款\"\"\"\n user_dict = read_file() #加载数据\n while True:\n demand_for_repayment = user_dict[user_card][\"quota\"] - user_dict[user_card][\"available_quota\"]\n if demand_for_repayment > 0: #判断是否需要还款\n print(\"你需要还款的总额为:%s\\n\" % demand_for_repayment)\n try:\n repayment_money = float(input(\"请输入本次还款金额>>>:\"))\n if repayment_money <= demand_for_repayment: #判断还款金额是否小于等于实际需要还款的金额\n user_dict[user_card][\"available_quota\"] += repayment_money\n write_file(user_dict)\n purpose = \"repayment\" #支付方式\n bill_write(user_card,purpose,repayment_money) #账单数据写入\n print(\"还款成功,可用额度为: \",user_dict[user_card][\"available_quota\"])\n break\n else:\n print(\"超过最大还款金额,请重新输入\\n\")\n except:\n print(\"你输入的不是数字,请重新输入\\n\")\n else:\n print(\"土豪,你不需要还款哟~~~\")\n break\n\ndef transfer(user_card):\n \"\"\"转账\"\"\"\n user_dict = read_file()\n while True:\n transfer_card = input(\"请输入需要转账的卡号>>>:\")\n if transfer_card in user_dict.keys(): #判断被转账的卡号是否存在\n try:\n transfer_amount = float(input(\"请输入转账金额>>>:\"))\n if transfer_amount <= user_dict[user_card][\"available_quota\"]: #判断转账金额是否小于可用额度\n user_dict[user_card][\"available_quota\"] -= transfer_amount #转账人可用额度减少\n user_dict[transfer_card][\"available_quota\"] += transfer_amount #被转账人可用额度增加\n write_file(user_dict)\n print(\"转账成功,可用额度为:\",user_dict[user_card][\"available_quota\"])\n purpose = \"transfer\"\n bill_write(user_card,purpose,transfer_amount) # 账单数据写入\n break\n else:\n print(\"你的可用额度为:%s,不够转账哦\"%user_dict[user_card][\"available_quota\"])\n except:\n print(\"转账金额需要为数字哦\")\n else:\n print(\"你输入的卡号不存在哦\\n\")\n\ndef withdraw(user_card):\n \"\"\"提现\"\"\"\n user_dict = read_file() #加载数据\n while True:\n try:\n withdraw_money = float(input(\"请输入你想取款的金额>>>:\"))\n service_charge = withdraw_money * 0.05 #手续费\n summary = withdraw_money + service_charge #总扣款额\n if summary <= user_dict[user_card][\"available_quota\"] / 2:\n user_dict[user_card][\"available_quota\"] -= summary\n print(\"你已经取款:%s,手续费:%s,可用额度:%s\" %(withdraw_money,service_charge,user_dict[user_card][\"available_quota\"]))\n write_file(user_dict)\n purpose = \"withdraw\"\n bill_write(user_card, purpose, withdraw_money) # 账单数据写入\n break\n else:\n print(\"你的额度不够哦,可用额度:\",user_dict[user_card][\"available_quota\"])\n except:\n print(\"输入必须为整数哦\")\n\ndef switch(user_card):\n \"\"\"切换账号\"\"\"\n run()\n\ndef bill(user_card):\n \"\"\"账单查询函数\"\"\"\n bill_path = BASE_DIR + \"/Atm/db/billing_record.txt\"\n with open(bill_path,\"r\") as f:\n bill_dict = json.load(f)\n print(\"尊敬的 [%s] 你的账单如下:\" %user_card)\n for i in bill_dict[user_card]:\n print(\"\\t%s %s\"%(i,bill_dict[user_card][i]))\n\ndef logout(user_card):\n \"\"\"退出\"\"\"\n sys.exit(\"你已经退出系统\")\n\ndef payment(user_total_pay):\n \"\"\"付款,用于第三方支付\"\"\"\n user_dict = read_file()\n while True:\n card = input(\"请输入卡号>>>:\")\n password = input(\"请输入密码>>>:\")\n if card in user_dict.keys(): # 判断用户是否存在\n if user_dict[card][\"state\"] == 1: # 判断用户是否冻结\n if user_dict[card][\"password\"] == password: # 判断密码是否正确\n if user_dict[card][\"available_quota\"] >= user_total_pay: # 判断额度是否够用\n user_dict[card][\"available_quota\"] -= user_total_pay\n write_file(user_dict)\n purpose = \"payment\"\n bill_write(card, purpose, user_total_pay) # 账单数据写入\n return True\n else:\n print(\"你的额度不够\")\n else:\n print(\"用户或密码错误\")\n else:\n print(\"该用户已经被冻结了\")\n elif card == \"b\":\n break\n else:\n print(\"用户不存在\")\n\n@login\ndef interactive(user_card,user_pass):\n \"\"\"交互函数\"\"\"\n menu = '''\n ----------尊敬的 %s ,欢迎光临 Alnk Bank ----------\n 1. 账户信息\n 2. 还款\n 3. 转账\n 4. 提现\n 5. 账单\n 6. 切换账号\n 7. 退出\n ''' %user_card\n menu_dict = {\n\n \"1\":user_info,\n \"2\":repayment,\n \"3\":transfer,\n \"4\":withdraw,\n \"5\":bill,\n \"6\":switch,\n \"7\":logout,\n }\n while True:\n print(menu)\n user_option = input(\"请输入编号>>>:\").strip()\n if user_option in menu_dict:\n menu_dict[user_option](user_card)\n else:\n print(\"你输入的编号有误,请重新输入\")\n\ndef run():\n user_card = input(\"请输入银行卡号>>>:\")\n user_pass = input(\"请输入密码>>>:\")\n #user_card = \"215233\"\n #user_pass = \"abc123\"\n interactive(user_card,user_pass)\n\nif __name__ == \"__main__\":\n run()","sub_path":"day04/02作业/Atm/core/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"640212153","text":"import datetime\nfrom tracker.models import Squirrel\nfrom django.core.management.base import BaseCommand\nimport csv\n\nclass Command(BaseCommand):\n \n args = '[appname.ModelName]'\n \n def add_arguments(self, parser):\n parser.add_argument('file', type=str)\n\n def handle(self, *args, **kwargs):\n x=kwargs['file']\n from django.apps import apps\n model = apps.get_model('tracker', 'Squirrel')\n field_names = [f.name for f in model._meta.fields]\n with open(x, 'w') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(field_names)\n for instance in model.objects.all():\n writer.writerow([getattr(instance, f) for f in field_names])\n","sub_path":"tracker/management/commands/export_squirrel_data.py","file_name":"export_squirrel_data.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"626739749","text":"def long_repeat(line: str) -> int:\n maxStreak = 1\n currStreak = 1\n for i in range(1,len(line)):\n if line[i-1] == line[i]:\n currStreak += 1\n else:\n if currStreak > maxStreak:\n maxStreak = currStreak\n currStreak = 1\n\n if currStreak > maxStreak:\n maxStreak = currStreak\n\n return maxStreak if line else 0\n\nif __name__ == '__main__':\n #These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert long_repeat('sdsffffse') == 4, \"First\"\n assert long_repeat('ddvvrwwwrggg') == 3, \"Second\"\n assert long_repeat('abababaab') == 2, \"Third\"\n assert long_repeat('') == 0, \"Empty\"\n assert long_repeat('aa') == 2, \"2fer\"\n print('\"Run\" is good. How is \"Check\"?')\n","sub_path":"python/checkio_questions/long_repeat/longRepeat.py","file_name":"longRepeat.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"369404236","text":"import pandas as pd\nimport os\n\ndata=pd.DataFrame()\nos.chdir('/Users/siaga/Desktop/NFT')\nfiles=os.listdir('/Users/siaga/Desktop/NFT')\nfor excel in files:\n raw=pd.read_excel(excel)\n raw=raw.dropna(axis=1,how='all')\n raw=raw.dropna()\n raw['formula']='C'+raw['C'].astype(int).astype(str)+'-H'+raw['H'].astype(int).astype(str)+'-O'+raw['O'].astype(int).astype(str)+'-N'+raw['N'].astype(int).astype(str)\n raw['m/z']=raw['m/z'].astype(str) + ',' + raw['formula']\n excel=excel.replace('.xlsx','')\n excel=excel.replace('-','_')\n for column in raw:\n if column != 'm/z' and column != 'intensity':\n del raw[column]\n raw['normalized']=raw['intensity']/raw['intensity'].sum()\n del raw['intensity']\n raw=raw.rename(columns={'m/z':'mass%s'%excel,'normalized':excel,'formula':'formula%s'%excel})\n data=pd.concat([data,raw],axis=1,sort=False)\ndata.to_excel('/Users/siaga/Desktop/processedData.xlsx')","sub_path":"PCA Tools/MergeExcels.py","file_name":"MergeExcels.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"645816630","text":"#!/usr/bin/env python\n\n################################################################################\n## {Description}: Validate the QR/Bar Code (for USB type camera)\n################################################################################\n## Author: Khairul Izwan Bin Kamsani\n## Version: {1}.{0}.{0}\n## Email: {wansnap@gmail.com}\n################################################################################\n\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport sys\nimport rospy\n\nfrom std_msgs.msg import String\nfrom std_msgs.msg import Int32\nfrom self_collect_machine.msg import boxStatus\n\nimport csv\nimport datetime\nimport cv2\n\nclass BoxIDValidate_node:\n\tdef __init__(self):\n\t\t# Initializing your ROS Node\n\t\trospy.init_node('BoxIDValidate_node', anonymous=True)\n\n\t\trospy.on_shutdown(self.shutdown)\n\n\t\t# Subscribe to the sensorState_1 topic\n#\t\tself.sensorState1_sub = rospy.Subscriber(\"/sensorState_1\", Int32)\n\t\tself.sensorState1_sub = rospy.Subscriber(\"/switchState_1\", Int32)\n\n\t\t# Subscribe to the sensorState_2 topic\n#\t\tself.sensorState2_sub = rospy.Subscriber(\"/sensorState_2\", Int32)\n\t\tself.sensorState1_sub = rospy.Subscriber(\"/switchState_2\", Int32)\n\n\t\t# Subscribe to the sensorState_3 topic\n#\t\tself.sensorState3_sub = rospy.Subscriber(\"/sensorState_3\", Int32)\n\t\tself.sensorState3_sub = rospy.Subscriber(\"/switchState_3\", Int32)\n\n\t\t# TODO: may add more\n\t\t# Subscribe to the sensorState_N topic\n\t\t#self.sensorStateN_sub = rospy.Subscriber(\"/sensorState_N\", Int32)\n\n\t\t# Publish to the scanned_barcode topic\n\t\tself.boxStatus_pub = rospy.Publisher(\"/box_available\", boxStatus, queue_size=10)\n\n\t\tself.getBoxState()\n\n\tdef getSensorState1(self):\n\t\t# Wait for the topic\n\t\tself.state1 = rospy.wait_for_message(\"/sensorState_1\", Int32)\n\n\tdef getSensorState2(self):\n\t\t# Wait for the topic\n\t\tself.state2 = rospy.wait_for_message(\"/sensorState_2\", Int32)\n\n\tdef getSensorState3(self):\n\t\t# Wait for the topic\n\t\tself.state3 = rospy.wait_for_message(\"/sensorState_3\", Int32)\n\n\t# TODO: may add more\n\t#def getSensorStateN(self):\n\t\t# Wait for the topic\n\t\t#self.stateN = rospy.wait_for_message(\"/sensorState_N\", String)\n\n\t# Shutdown\n\tdef shutdown(self):\n\t\ttry:\n\t\t\trospy.loginfo(\"[INFO] BoxIDValidate_node [OFFLINE]...\")\n\n\t\tfinally:\n\t\t\tpass\n\n\tdef getBoxState(self):\n\t\t# Initiate the topic\n\t\tself.boxState = boxStatus()\n\n\t\twhile not rospy.is_shutdown():\n\t\t\t# Get the scan-ed data\n\t\t\tself.getSensorState1()\n\t\t\tself.getSensorState2()\n\t\t\tself.getSensorState3()\n\t\t\t# TODO: May add more here\n\n\t\t\tself.boxState.data = [self.state1.data, self.state2.data]\n\t\t\tself.boxStatus_pub.publish(self.boxState)\n\ndef main(args):\n\tvn = BoxIDValidate_node()\n\n\ttry:\n\t\trospy.spin()\n\texcept KeyboardInterrupt:\n\t\trospy.loginfo(\"[INFO] BoxIDValidate_node [OFFLINE]...\")\n\n\tcv2.destroyAllWindows()\n\nif __name__ == '__main__':\n\trospy.loginfo(\"[INFO] BoxIDValidate_node [ONLINE]...\")\n\tmain(sys.argv)\n","sub_path":"script/box_validity_rev1.py","file_name":"box_validity_rev1.py","file_ext":"py","file_size_in_byte":2895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"8523489","text":"import numpy as np\nimport cv2 \nfrom array import array\nfrom PIL import Image\n\n\nclass imageProcess():\n\n\t#ImgGlasses=\"g2.png\"\n\t#ImgPhoto=\"5.jpg\" #user\n\t#-------------------------\n\t#ImgChanged=\"gozluk.png\"\n\t#ImgNew = \"new.png\"\n\n\n\tpwdHar =\"/root/opencv/data/haarcascades/\" #sabit degerler degistirilecek\n\tpwdImg=\"/root/\" # sabit degerler degistirilcek\n\n\tdef __init__(self, ImgGlasses, ImgPhoto, ImgChanged, ImgNew):\n\t\tself.ImgGlasses = ImgGlasses\n\t\tself.ImgPhoto = ImgPhoto\t\n\t\tself.ImgChanged = ImgChanged\n\t\tself.ImgNew = ImgNew\n\n\tdef resize_const(self, name, size):\n\t\tim = Image.open(self.name)\n\t\twidth, height = im.size\n\t\toutput = im.resize(self.size, Image.ANTIALIAS)\n\t\toutput.save(self.pwdImg + self.ImgChanged) #adresi degistir\n\t\n\n\tdef watermark(self, name, glasses, coordinate):#kullanici\n\t\timage = Image.open(name)\n\t\tlogo = Image.open(glasses)#gozluk\n\t\timage.paste(logo,(coordinate[0],coordinate[1]),logo)\n\t\timage.save(self.pwdImg + self.ImgNew,'PNG')\n\n\tdef resize_auto(self, name, size):\n\t\tim = Image.open(name)\n\t\twpercent = (size/float(im.size[0]))\n\t\thsize = int((float(im.size[1])*float(wpercent)))\n\t\toutput = im.resize((size,hsize), Image.ANTIALIAS)\n\t\toutput.save(self.pwdImg + self.ImgChanged) #adresi degistir\n\n\tdef start(self):\n\t\tface_cascade = cv2.CascadeClassifier(self.pwdHar+'haarcascade_frontalface_default.xml')\n\t\teye_cascade = cv2.CascadeClassifier(self.pwdHar+ 'haarcascade_eye.xml') \n\t\timg = cv2.imread(self.pwdImg + self.ImgPhoto)\n\n\t\tgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\t\teyeXY = ()\n\t\tfaces = face_cascade.detectMultiScale(gray, 1.3, 5)\n\t\tfor (x,y,w,h) in faces: \n\t\t cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)\n\t\troi_gray = gray[y:y+h, x:x+w]\n\t\troi_color = img[y:y+h, x:x+w] \n\t\teyes = eye_cascade.detectMultiScale(roi_gray)\n\n\t\tfor (ex,ey,ew,eh) in eyes: \n\t\t\teyeXY = eyeXY +((ex+ex+ew)/2+5,(ey+ey+eh)/2)\n\t\tself.resize_auto(self.pwdImg + self.ImgGlasses,w)#gozluk orjinal\n\t\tself.watermark(self.pwdImg + self.ImgPhoto, self.pwdImg + self.ImgChanged,(x+4,y+ey))\n\n\nif __name__ == \"__main__\":\n\tobj = imageProcess('g2.png','velet.jpg','gozluk.png','ayberk.png')\n\tobj.start()\n","sub_path":"Image Processing/projectdocs/imageProcessold.py","file_name":"imageProcessold.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"88671544","text":"\"\"\"\n Lesson 08 main module.\n\n\"\"\"\n\nimport csv\nimport logging\nfrom functools import partial\nimport os\n\n\n# Setup logging\nCONSOLE_LOG_FORMAT = \"%(filename)s:%(lineno)-4d %(message)s\"\nCONSOLE_FORMATTER = logging.Formatter(CONSOLE_LOG_FORMAT)\nCONSOLE_HANDLER = logging.StreamHandler()\nCONSOLE_HANDLER.setLevel(logging.WARNING)\nCONSOLE_HANDLER.setFormatter(CONSOLE_FORMATTER)\n\nLOGGER = logging.getLogger()\nLOGGER.setLevel(logging.DEBUG)\nLOGGER.addHandler(CONSOLE_HANDLER)\n\n\ndef add_furniture(invoice_file, customer_name, item_code, item_description, item_monthly_price):\n \"\"\"\n Add the furniture items rented to customers to the invoice_file .csv in the following format\n customer_name,item_code,item_description,item_monthly_price\n Will create invoice_file if it doesn't exist or append a new line to it if it does.\n\n :param invoice_file: .csv file\n :param customer_name: string\n :param item_code: string\n :param item_description: string\n :param item_monthly_price: int\n :return: None\n \"\"\"\n field_names = ['customer_name', 'item_code', 'item_description', 'item_monthly_price']\n # Test if file exists\n if not os.path.exists(invoice_file):\n with open(invoice_file, 'w', newline='') as file:\n writer = csv.DictWriter(file, fieldnames=field_names)\n writer.writeheader()\n\n with open(invoice_file, 'a', newline='') as file:\n logging.debug('%s found, appending data.', file.name)\n writer = csv.DictWriter(file, fieldnames=field_names)\n writer.writerow({'customer_name': customer_name,\n 'item_code': item_code,\n 'item_description': item_description,\n 'item_monthly_price': item_monthly_price})\n\n\ndef single_customer(customer_name, invoice_file):\n \"\"\"\n Return a function that takes rental items and adds them to the invoice_file. Return function\n will leverage the add_furniture() func using functools.partial\n :param customer_name: string\n :param invoice_file: string\n :return: f(rental_items)\n \"\"\"\n def single_customer_bulk(rental_items):\n # Use functools.partial\n partial_add = partial(add_furniture, invoice_file=invoice_file, customer_name=customer_name)\n # Read/write bulk records\n with open(rental_items, 'r') as file:\n reader = csv.reader(file)\n for row in reader:\n partial_add(item_code=row[1], item_description=row[2], item_monthly_price=row[3])\n\n return single_customer_bulk\n","sub_path":"students/franjaku/lesson08/inventory.py","file_name":"inventory.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"343522024","text":"# coding:utf-8\n# 五子棋程序\n# 强化学习训练过程\n\n\nimport random\nimport numpy as np\nimport tqdm\nfrom collections import defaultdict, deque\nfrom mcts import MCTSPlayer as MCTS_Pure\nfrom mcts_alphaZero import AlphaZeroMctsPlayer as MCTSPlayer\nfrom policy_value_net import PolicyValueNet\nfrom tools import *\nimport run\nimport matplotlib.pyplot as plt\n\n\nclass TrainPipeline:\n def __init__(self, init_model=None):\n # 棋盘数据\n self.board_width = 8\n self.board_height = 8\n # self.n_in_row = 5\n self.board = chessboard(row=self.board_width, col=self.board_height)\n # 训练参数\n self.learn_rate = 2e-3\n self.lr_multiplier = 1.0\n self.temp = 1.0\n self.n_playout = 400 # 每次模拟次数\n self.c_puct = 5\n self.buffer_size = 10000000\n self.batch_size = 512 # 每批样本量\n self.data_buffer = deque(maxlen=self.buffer_size)\n self.play_batch_size = 1\n self.epochs = 5 # 每次更新前迭代次数\n self.kl_targ = 0.02\n self.check_freq = 2\n # 自我对弈次数\n self.game_batch_num = 1000\n self.best_win_ratio = 0.0\n # 纯蒙特卡罗树搜索,用来作为基准\n self.pure_mcts_playout_num = 400\n # 有预训练模型的情况\n if init_model:\n self.policy_value_net = PolicyValueNet(self.board_width, self.board_height, model_file=init_model)\n else:\n # 从头开始训练\n self.policy_value_net = PolicyValueNet(self.board_width, self.board_height)\n self.mcts_player = MCTSPlayer(self.policy_value_net.policy_value_fn, c_puct=self.c_puct, n_playout=self.n_playout, is_selfplay=1)\n \n # 扩充训练数据\n def get_equi_data(self, play_data):\n # 用旋转和翻转来设置数据\n # play_data:[(state, mcts_prob, winner_z), ..., ...]\n extend_data = []\n for state, mcts_porb, winner in play_data:\n for i in [1, 2, 3, 4]:\n # 顺时针旋转\n equi_state = np.array([np.rot90(s, i) for s in state])\n equi_mcts_prob = np.rot90(np.flipud(\nmcts_porb.reshape(self.board_height, self.board_width)), i)\n extend_data.append((equi_state, np.flipud(equi_mcts_prob).flatten(), winner))\n # 垂直翻转\n equi_state = np.array([np.fliplr(s) for s in equi_state])\n equi_mcts_prob = np.fliplr(equi_mcts_prob)\n extend_data.append((equi_state, np.flipud(equi_mcts_prob).flatten(), winner))\n return extend_data\n \n # 进行一轮自我博弈\n def start_self_play(self, player, is_shown=0, temp=1e-3):\n self.board.reset()\n p1, p2 = self.board.players\n states, mcts_probs, current_players = [], [], []\n # 测试\n # t = 0\n while True:\n # t += 1\n # print(t)\n move, move_probs = player.get_action(self.board, temp = temp, return_prob=1)\n # print(\"测试\", move_probs)\n # store the data\n states.append(self.board.current_state())\n mcts_probs.append(move_probs)\n current_players.append(self.board.current_player)\n # perform a move\n self.board.do_move(move)\n if is_shown:\n display(self.board)\n end, winner = self.board.game_end()\n # print(t, end, winner, self.board.count)\n if end:\n # winner from the perspective of the current player of each state\n winners_z = np.zeros(len(current_players))\n if winner != -1:\n winners_z[np.array(current_players) == winner] = 1.0\n winners_z[np.array(current_players) != winner] = -1.0\n # reset MCTS root node\n player.reset_player()\n if is_shown:\n if winner != -1:\n print(\"Game end. Winner is player:\", winner)\n else:\n print(\"Game end. Tie\")\n return winner, zip(states, mcts_probs, winners_z)\n \n # 收集自我博弈训练数据\n def collect_selfplay_data(self, n_games=1):\n for i in range(n_games):\n # print(\"测试\", i)\n winner, play_data = self.start_self_play(self.mcts_player, temp=self.temp, is_shown = False)\n play_data = list(play_data)[:]\n self.episode_len = len(play_data)\n # augment the data\n play_data = self.get_equi_data(play_data)\n self.data_buffer.extend(play_data)\n \n # 更新策略值网络\n def policy_update(self):\n mini_batch = random.sample(self.data_buffer, self.batch_size)\n state_batch = [data[0] for data in mini_batch]\n mcts_probs_batch = [data[1] for data in mini_batch]\n winner_batch = [data[2] for data in mini_batch]\n old_probs, old_v = self.policy_value_net.policy_value(state_batch)\n for i in range(self.epochs):\n loss, entropy = self.policy_value_net.train_step(state_batch, mcts_probs_batch, winner_batch, self.learn_rate*self.lr_multiplier)\n new_probs, new_v = self.policy_value_net.policy_value(state_batch)\n kl = np.mean(np.sum(old_probs * (np.log(old_probs + 1e-10) - np.log(new_probs + 1e-10)), axis=1))\n if kl > self.kl_targ * 4: # 早期停止\n break\n # 调整学习率\n if kl > self.kl_targ * 2 and self.lr_multiplier > 0.1:\n self.lr_multiplier /= 1.5\n elif kl < self.kl_targ / 2 and self.lr_multiplier < 10:\n self.lr_multiplier *= 1.5\n explained_var_old = (1 - np.var(np.array(winner_batch) - old_v.flatten()) / np.var(np.array(winner_batch)))\n explained_var_new = (1 - np.var(np.array(winner_batch) - new_v.flatten()) / np.var(np.array(winner_batch)))\n print((\"kl:{:.5f},\"\n \"lr_multiplier:{:.3f},\"\n \"loss:{},\"\n \"entropy:{},\"\n \"explained_var_old:{:.3f},\"\n \"explained_var_new:{:.3f}\"\n ).format(kl,\n self.lr_multiplier,\n loss,\n entropy,\n explained_var_old,\n explained_var_new))\n return loss, entropy\n \n # 进行一局对弈\n def start_play(self, player1, player2, start_player=1, is_shown=1):\n if start_player not in (1, 2):\n raise Exception('start_player should be either 0 (player1 first) ''or 1 (player2 first)')\n self.board.reset(start_player)\n p1, p2 = self.board.players\n player1.set_player_ind(p1)\n player2.set_player_ind(p2)\n players = {p1: player1, p2: player2}\n if is_shown:\n display(self.board)\n while True:\n current_player = self.board.get_current_player()\n # print(current_player, players)\n player_in_turn = players[current_player]\n move = player_in_turn.get_action(self.board)\n self.board.do_move(move)\n if is_shown:\n display(self.board)\n end, winner = self.board.game_end()\n if end:\n if is_shown:\n if winner != -1:\n print(\"Game end. Winner is\", players[winner])\n else:\n print(\"Game end. Tie\")\n return winner\n \n # 策略评估,用纯蒙特卡罗树搜索来做基准\n def policy_evaluate(self, n_games=10):\n current_mcts_player = MCTSPlayer(self.policy_value_net.policy_value_fn, c_puct=self.c_puct, n_playout=self.n_playout)\n pure_mcts_player = MCTS_Pure(c_puct=5,n_playout=self.pure_mcts_playout_num)\n win_cnt = defaultdict(int)\n for i in range(n_games):\n winner = self.start_play(current_mcts_player, pure_mcts_player, start_player=i % 2 + 1, is_shown=0)\n win_cnt[winner] += 1\n win_ratio = 1.0*(win_cnt[1] + 0.5*win_cnt[-1]) / n_games\n print(\"num_playouts:{}, win: {}, lose: {}, tie:{}\".format(\n self.pure_mcts_playout_num,\n win_cnt[1], win_cnt[2], win_cnt[-1]))\n return win_ratio\n \n # 运行训练\n @run.change_dir\n @run.timethis\n def run(self):\n try:\n losses = []\n for i in tqdm.tqdm(range(self.game_batch_num)):\n self.collect_selfplay_data(self.play_batch_size)\n print(\"batch i:{}, episode_len:{}\".format(i+1, self.episode_len))\n # 测试用的\n # self.policy_value_net.save_model('./output/best_policy.model')\n if len(self.data_buffer) > self.batch_size:\n loss, entropy = self.policy_update()\n losses.append(loss)\n # print(i, loss)\n # 检查当前模型表现并保存模型\n if (i+1) % self.check_freq == 0:\n print(\"当前自训练次数: {}\".format(i+1))\n win_ratio = self.policy_evaluate()\n self.policy_value_net.save_model('./output/current_policy.model')\n if win_ratio > self.best_win_ratio:\n print(\"新的最佳策略!!!!!!!!\")\n self.best_win_ratio = win_ratio\n # update the best_policy\n self.policy_value_net.save_model('./output/best_policy.model')\n if (self.best_win_ratio == 1.0 and self.pure_mcts_playout_num < 5000):\n self.pure_mcts_playout_num += 1000\n self.best_win_ratio = 0.0\n plt.figure()\n plt.plot(losses)\n plt.savefig(\"./output/loss.png\")\n except KeyboardInterrupt:\n print('\\n\\rquit')\n\n\nif __name__ == \"__main__\":\n training_pipeline = TrainPipeline()\n training_pipeline.run()\n ","sub_path":"RLtrain.py","file_name":"RLtrain.py","file_ext":"py","file_size_in_byte":10055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"501719267","text":"\"\"\"\nfind.py - Willie Spelling correction module\nCopyright 2011, Michael Yanovich, yanovich.net\nCopyright 2013, Edward Powell, embolalia.net\nLicensed under the Eiffel Forum License 2.\n\nhttp://willie.dftba.net\n\nContributions from: Matt Meinwald and Morgan Goose\nThis module will fix spelling errors if someone corrects them\nusing the sed notation (s///) commonly found in vi/vim.\n\"\"\"\n\nimport re\n\ndef setup(willie):\n willie.memory['find_lines'] = dict()\n \ndef collectlines(willie, trigger):\n \"\"\"Create a temporary log of what people say\"\"\"\n \n # Don't log things in PM\n if not trigger.sender.startswith('#'):\n return\n\n # Add a log for the channel and nick, if there isn't already one\n if trigger.sender not in willie.memory['find_lines']:\n willie.memory['find_lines'][trigger.sender] = dict()\n if trigger.nick not in willie.memory['find_lines'][trigger.sender]:\n willie.memory['find_lines'][trigger.sender][trigger.nick] = list()\n\n # Create a temporary list of the user's lines in a channel\n templist = willie.memory['find_lines'][trigger.sender][trigger.nick]\n line = trigger.group()\n if line.startswith(\"s/\"): # Don't remember substitutions\n return\n elif line.startswith(\"\\x01ACTION\"): # For /me messages\n line = line[:-1]\n templist.append(line)\n else:\n templist.append(line)\n\n del templist[:-10] # Keep the log to 10 lines per person\n \n willie.memory['find_lines'][trigger.sender][trigger.nick] = templist\ncollectlines.rule = r'.*'\ncollectlines.priority = 'low'\n\n\ndef findandreplace(willie, trigger):\n # Don't bother in PM\n if not trigger.sender.startswith('#'): return\n\n rnick = trigger.group(1) or trigger.nick # Correcting other person vs self.\n\n search_dict = willie.memory['find_lines']\n # only do something if there is conversation to work with\n if trigger.sender not in search_dict:\n return\n if rnick not in search_dict[trigger.sender]:\n return\n\n sep = trigger.group(2)\n rest = trigger.group(3).split(sep)\n me = False # /me command\n flags = ''\n \n # Account for if extra flags are given (just g and i for now), or a search\n # and substitution pattern aren't given.\n if len(rest) < 2:\n return\n elif len(rest) > 2:\n # Word characters immediately after the second separator\n # are considered flags (only g and i now have meaning)\n flags = re.match(r'\\w*',rest[2], re.U).group(0)\n \n # If g flag is given, replace all. Otherwise, replace once.\n if 'g' in flags:\n count = -1\n else:\n count = 1\n \n # repl is a lambda function which performs the substitution. i flag turns\n # off case sensitivity. re.U turns on unicode replacement.\n if 'i' in flags:\n regex = re.compile(re.escape(rest[0]),re.U|re.I)\n repl = lambda s: re.sub(regex,rest[1],s,count == 1)\n else:\n repl = lambda s: s.replace(rest[0],rest[1],count)\n\n # Look back through the user's lines in the channel until you find a line\n # where the replacement works\n for line in reversed(search_dict[trigger.sender][rnick]):\n if line.startswith(\"\\x01ACTION\"):\n me = True # /me command\n line = line[8:]\n else:\n me = False\n new_phrase = repl(line)\n if new_phrase != line: # we are done\n break\n\n if not new_phrase or new_phrase == line:\n return # Didn't find anything\n\n # Save the new \"edited\" message.\n action = (me and '\\x01ACTION ') or '' # If /me message, prepend \\x01ACTION\n templist = search_dict[trigger.sender][rnick]\n templist.append(action + new_phrase)\n search_dict[trigger.sender][rnick] = templist\n willie.memory['find_lines'] = search_dict\n\n # output\n if not me:\n new_phrase = '\\x02meant\\x02 to say: ' + new_phrase\n if trigger.group(1):\n phrase = '%s thinks %s %s' % (trigger.nick, rnick, new_phrase)\n else:\n phrase = '%s %s' % (trigger.nick, new_phrase)\n\n willie.say(phrase)\n\n# Matches optional whitespace + 's' + optional whitespace + separator character\nfindandreplace.rule = r'(?u)(?:([^\\s:]+)[\\s:])?\\s*s\\s*([^\\s\\w])(.*)' # May work for both this and \"meant\" (requires trigger.group(i+1))\nfindandreplace.priority = 'high'\n\n\n","sub_path":"willie/modules/find.py","file_name":"find.py","file_ext":"py","file_size_in_byte":4288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"231750418","text":"#!/bin/python3\n\nimport os\nimport wget\nimport concurrent.futures\nimport functools\nimport time\nfrom tqdm import tqdm\n\n\ndef progress_bar(expected_time, increments=10):\n\n def _progress_bar(func):\n\n def timed_progress_bar(future, expected_time, increments=10):\n \"\"\"\n Display progress bar for expected_time seconds.\n Complete early if future completes.\n Wait for future if it doesn't complete in expected_time.\n \"\"\"\n interval = expected_time / increments\n with tqdm(total=increments) as pbar:\n for i in range(increments - 1):\n if future.done():\n # finish the progress bar\n # not sure if there's a cleaner way to do this?\n pbar.update(increments - i)\n return\n else:\n time.sleep(interval)\n pbar.update()\n # if the future still hasn't completed, wait for it.\n future.result()\n pbar.update()\n\n @functools.wraps(func)\n def _func(*args, **kwargs):\n with concurrent.futures.ThreadPoolExecutor(max_workers=1) as pool:\n future = pool.submit(func, *args, **kwargs)\n timed_progress_bar(future, expected_time, increments)\n\n return future.result()\n\n return _func\n\n return _progress_bar\n\n\nif not (os.path.isfile('gdrive.sh')):\n wget.download('https://raw.githubusercontent.com/GitHub30/gdrive.sh/master/gdrive.sh')\n\n\n@progress_bar(expected_time=11)\ndef binodfunc(fileid):\n os.system('curl gdrive.sh | bash -s {}'.format(fileid))\n\n","sub_path":"binodcli/binodfile.py","file_name":"binodfile.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"339949764","text":"#!/usr/bin/env python2.7\n# Run ORAC postprocessor from the community code\n# 18 Feb 2016, AP: Initial version\n# 24 Jun 2016, AP: P2.7 rewrite\n# 08 Jul 2016, AP: Debugging against more awkward python environments\n\nfrom colours import cprint\n\nimport argparse\nimport orac_utils as ou\n\n\n# Define parser\nparser = argparse.ArgumentParser(\n description='Run the ORAC postprocessor on all files of given phases.')\nou.args_common(parser)\nou.args_postproc(parser)\nargs = parser.parse_args()\n\ntry:\n ou.check_args_postproc(args)\n\n # Run postprocessor\n driver = ou.build_postproc_driver(args)\n ou.call_exe(args, args.orac_dir+'/post_processing/post_process_level2',\n driver)\nexcept ou.OracError as err:\n cprint('ERROR) ' + err.message, ou.colouring['error'])\nexcept KeyboardInterrupt:\n cprint('Execution halted by user.', ou.colouring['error'])\n","sub_path":"src/orac/old_py_scripts/orac_postproc.py","file_name":"orac_postproc.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"219959904","text":"# Caesar cipher project\n# By Martijn Vissers\n\n# list of characters\nalphabet = \"a b c d e f g h i j k l m n o p q r s t u v w x y z\".split(\" \")\ncapital_alphabet = \"A B C D E F G H I J K L M N O P Q R S T U V W X Y Z\".split(\" \")\n\n\n# Function to print the menu\ndef print_menu():\n print(\"-----------------------------------------\")\n print(\"Caesar Cipher\")\n print()\n print(\"Please choose a task from the menu below.\")\n print(\"-----------------------------------------\")\n print(\"e - Encryption\")\n print(\"d - Decryption\")\n print(\"b - Brute Force\")\n print()\n print(\"q - Quit\")\n print(\"-----------------------------------------\")\n\n\n# Function to handle incorrect inputs\ndef invalid_input():\n print(\"Invalid input. Please enter a valid input\")\n print()\n input(\"Press enter to continue ...\")\n\n\n# Encryption function\ndef encrypt(message, key):\n encrypted_message = \"\"\n # Loop over all characters in the message\n for i in message:\n # Reset the index\n index = key\n\n # Check if character is in alphabet\n if i.lower() in alphabet:\n # if to get the proper index (key) by checking if its more than 26\n if alphabet.index(i.lower()) + key > alphabet.__len__()-1:\n index = (alphabet.index(i.lower()) + key) - alphabet.__len__()\n else:\n index = alphabet.index(i.lower())+key\n # Add the proper letter (and proper case) to the cypher\n if i in alphabet:\n encrypted_message += alphabet[index]\n elif i in capital_alphabet:\n encrypted_message += capital_alphabet[index]\n # Else, add the letter simply to the message\n else:\n encrypted_message += i\n\n # Return the encrypted message\n return encrypted_message\n\n\n# Decrypt function\ndef decrypt(message, key):\n decrypted_message = \"\"\n # Loop over all characters in the message\n for i in message:\n # Reset the index\n index = key\n\n # Check if character is in alphabet\n if i.lower() in alphabet:\n # if to get the proper index (key) by checking if its more than 26\n if alphabet.index(i.lower()) - key < 0:\n index = (alphabet.index(i.lower()) - key) + alphabet.__len__()\n else:\n index = alphabet.index(i.lower()) - key\n # Add the proper letter (and proper case) to the cypher\n if i in alphabet:\n decrypted_message += alphabet[index]\n elif i in capital_alphabet:\n decrypted_message += capital_alphabet[index]\n # Else, add the letter simply to the message\n else:\n decrypted_message += i\n\n # Return the encrypted message\n return decrypted_message\n\n\n# Encrypt menu function\ndef encrypt_menu():\n # Get some inputs\n print(\"Enter your message\")\n message = input()\n print(\"\\nEnter the key (1-26)\")\n key = int(input())\n\n # Check if input is correct\n if not 1 <= key <= 26:\n invalid_input()\n else:\n # Calculate the cipher\n encrypted_message = encrypt(message, key)\n\n # Print the message\n print(\"\\nThe encrypted message is:\")\n print(encrypted_message)\n input(\"\\nPress enter to continue ...\")\n\n\n# Decrypt menu function\ndef decrypt_menu():\n # Get some inputs\n print(\"Enter your message\")\n message = input()\n print(\"\\nEnter the key (1-26)\")\n key = int(input())\n \n # Check if input is correct\n if not 1 <= key <= 26:\n invalid_input()\n else:\n # Calculate the cipher\n decrypted_message = decrypt(message, key)\n\n # Print the message\n print(\"\\nThe decrypted message is:\")\n print(decrypted_message)\n input(\"\\nPress enter to continue ...\")\n\n\n# Brute force menu\ndef bruteforce_menu():\n # Get some input\n print(\"Enter your message\")\n message = input()\n decrypted_message = \"\"\n\n # Calculate the ciphers and add them to a string\n for i in range(1,27):\n decrypted_message += str(i) + \": \"\n decrypted_message += decrypt(message, i)\n decrypted_message += \"\\n\"\n\n # Print the message\n print(\"\\nThe possible decrypted messages are:\")\n print(decrypted_message)\n input(\"Press enter to continue ...\")\n\n\n# Quit variable\nq = False\n\n# Main loop\nwhile not q:\n # Print menu and ask for input\n print_menu()\n task = input()\n # Check the input\n if task == 'q':\n q = True\n elif task == 'e':\n encrypt_menu()\n elif task == 'd':\n decrypt_menu()\n elif task == 'b':\n bruteforce_menu()\n else:\n invalid_input()","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":4663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"313959574","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\n\nfrom npbrain.core import integrate\nfrom npbrain.core.synapse import *\n\n__all__ = [\n 'NMDA',\n]\n\n\ndef NMDA(pre, post, connection, delay=None, g_max=0.15, E=0, alpha=0.062, beta=3.75,\n cc_Mg=1.2, tau_decay=100., a=0.5, tau_rise=2., name='NMDA'):\n \"\"\"NMDA conductance-based synapse.\n\n .. math::\n\n I_{syn}&=\\\\bar{g}_{syn} s (V-E_{syn})\n\n g(t) &=\\\\bar{g} \\\\cdot (V-E_{syn}) \\\\cdot g_{\\\\infty}\n \\\\cdot \\\\sum_j s_j(t) \\\\quad (3)\n\n g_{\\\\infty}(V,[{Mg}^{2+}]_{o}) & =(1+{e}^{-\\\\alpha V}\n [{Mg}^{2+}]_{o}/\\\\beta)^{-1} \\\\quad (4)\n\n \\\\frac{d s_{j}(t)}{dt} & =-\\\\frac{s_{j}(t)}\n {\\\\tau_{decay}}+a x_{j}(t)(1-s_{j}(t)) \\\\quad (5)\n\n \\\\frac{d x_{j}(t)}{dt} & =-\\\\frac{x_{j}(t)}{\\\\tau_{rise}}+\n \\\\sum_{k} \\\\delta(t-t_{j}^{k}) \\\\quad (6)\n\n where the decay time of NMDA currents is taken to be :math:`\\\\tau_{decay}` =100 ms,\n :math:`a= 0.5 ms^{-1}`, and :math:`\\\\tau_{rise}` =2 ms (Hestrin et al., 1990;\n Spruston et al., 1995).\n\n Parameters\n ----------\n pre : Neurons\n The pre-synaptic neuron group.\n post : Neurons\n The post-synaptic neuron group.\n connection : tuple\n The connection.\n delay : None, float\n The delay length.\n g_max : float\n The maximum conductance.\n E : float\n The reversal potential.\n alpha : float\n beta : float\n cc_Mg : float\n tau_decay : float\n The time constant of decay.\n tau_rise : float\n The time constant of rise.\n a : float\n name : str\n The name of synapse.\n\n Returns\n -------\n synapse : Synapses\n The constructed AMPA synapses.\n \"\"\"\n\n num_pre = pre.num\n num_post = post.num\n var2index = {'x': (2, 0), 's': (2, 1), 'post_V': (1, -1)}\n\n pre_indexes, post_indexes, pre_anchors = connection\n num = len(pre_indexes)\n\n # The first (num_syn, ) variable is ``x``\n # The second (num_syn, ) variable is ``s``\n # The first last (num_post, ) variable is the post-synaptic potential\n state = initial_syn_state(delay, num_pre, num_post, num,\n num_post_shape_var=1, num_syn_shape_var=2)\n\n @integrate()\n def int_x(x, t):\n return -x / tau_rise\n\n @integrate()\n def int_s(s, t, x):\n return -s / tau_decay + a * x * (1 - s)\n\n def update_state(syn_state, t, var_index):\n # get synapse state\n spike = syn_state[0][0]\n post_v = syn_state[1][-1]\n x = syn_state[2][0]\n s = syn_state[2][1]\n # calculate synaptic state\n spike_idx = np.where(spike > 0.)[0]\n for i in spike_idx:\n idx = pre_anchors[:, i]\n x[idx[0]: idx[1]] += 1\n x = int_x(x, t)\n s = int_s(s, t, x)\n syn_state[2][0] = x\n syn_state[2][1] = s\n # get post-synaptic values\n g = np.zeros(num_post)\n for i in range(num_pre):\n idx = pre_anchors[:, i]\n post_idx = post_indexes[idx[0]: idx[1]]\n g[post_idx] += s[idx[0]: idx[1]]\n g_inf = 1 + cc_Mg / beta * np.exp(-alpha * post_v)\n g = g_inf * g\n record_conductance(syn_state, var_index, g)\n\n def output_synapse(syn_state, var_index, post_neu_state):\n output_idx = var_index[-2]\n g_val = syn_state[output_idx[0]][output_idx[1]]\n post_val = - g_max * g_val * (post_neu_state[0] - E)\n post_neu_state[-1] += post_val\n\n def collect_spike(syn_state, pre_neu_state, post_neu_state):\n # spike\n syn_state[0][-1] = pre_neu_state[-3]\n # membrane potential of post-synaptic neuron group\n syn_state[1][-1] = post_neu_state[0]\n\n return Synapses(**locals())\n","sub_path":"npbrain/synapses/NMDA_synapses.py","file_name":"NMDA_synapses.py","file_ext":"py","file_size_in_byte":3726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"331028579","text":"from django.conf.urls import patterns, url\n\nEMAIL = \"(?P[-+_\\w\\.@]+)\"\nVERIFICATION_CODE = \"(?P[-_\\w]+)\"\n\nurlpatterns = patterns('',\n url(r'^subscribe_email$', 'publish.views.subscribe_email', name='subscribe_email'),\n url(r'^unsubscribe/'+EMAIL+'$', 'publish.views.unsubscribe_email', name='unsubscribe_email'),\n url(r'^verify/'+EMAIL+'/'+VERIFICATION_CODE+'$', 'publish.views.verify', name='verify'),\n url(r'^spam$', 'publish.views.spam', name='spam'),\n url(r'^bad$', 'publish.views.bad', name='bad'),\n url(r'^subscribe$', 'publish.views.subscribe', name='subscribe'),\n url(r'^manage$', 'publish.views.manage', name='manage'),\n)\n","sub_path":"threepanel/publish/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"238097821","text":"# imports\nimport pygame\nfrom katacka.katacka import Katacka\n\n# Constants\nSURFACE_WIDTH = 1000\nSURFACE_HEIGHT = 700\nFPS = 20\n\n# initializations\npygame.init()\nclock = pygame.time.Clock()\nkatacka = Katacka(SURFACE_WIDTH, SURFACE_HEIGHT)\n\n# Main Loop \nwhile True:\n \n # natural update state\n katacka.update_state()\n \n # action detection\n for event in pygame.event.get():\n katacka.handle_event(event)\n\n # graphics drawing\n katacka.draw_graphics()\n pygame.display.update()\n\n # Refresh rate\n clock.tick(FPS)\n","sub_path":"src/run_katacka.py","file_name":"run_katacka.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"135563942","text":"# This file is part of e-genie\n#\n# e-genie is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# e-genie is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with e-genie. If not, see .\n\nfrom django.shortcuts import render\nfrom django.views.generic import TemplateView\nfrom sd_store.models import Sensor, Channel, SensorReading\nfrom graphs.models import PairColour\nfrom egenie.views import RotatingView\nimport datetime\nimport pytz\n\nfrom django.core.urlresolvers import reverse\nfrom deployments.models import Deployment, DeploymentState\nfrom annotations.models import DeploymentAnnotation\nimport datetime\nfrom basicutils.djutils import to_dict\nfrom django.http import Http404, HttpResponseBadRequest\nfrom django.utils.dateparse import parse_datetime\nfrom sd_store.forms import SampledIntervalForm\nfrom sd_store import sdutils\nfrom django.db.models import Sum\nfrom django.http import HttpResponse\nimport json\n\n\nclass AnnotationView(RotatingView):\n \"\"\" Displays sensor readings from all electricity readings\n as line graphs, and lets users add annotations by selecting\n ranges of the graphs.\"\"\"\n # model = Deployment\n template_name = 'graphs/annotation.html'\n\n def get_back_url(self):\n return reverse('home')\n\n def get_context_data(self, **kwargs):\n context = super(AnnotationView, self).get_context_data(\n screen='annotation', **kwargs)\n deployment = context['plinth'].deployment\n dateTo = datetime.datetime.now(tz=pytz.utc) # - datetime.timedelta(days=21)\n dateFrom = dateTo.replace(hour=0, minute=0, second=0, microsecond=0)\n context['dateTo'] = dateTo.strftime(\"%Y-%m-%d %H:%M:%S\")\n context['dateFrom'] = dateFrom.strftime(\"%Y-%m-%d %H:%M:%S\")\n context['deployment'] = deployment\n context['mode'] = 'electricity'\n context['colours'] = PairColour.objects.all()\n context['all_sensors'] = Sensor.objects.filter(\n deployment_details__active=True, position__isnull=False)\n return context\n\n\n# def generate_stats(deployment, sensor, channel, start, end, requested_interval):\n# readings = sdutils.filter_according_to_interval(\n# sensor, channel, start, end, requested_interval, 'generic')\n# values = [reading.value for reading in readings]\n\n# if len(values) == 0:\n# return {}\n# stats_obj = {}\n\n# stats_obj['max'] = round(max(values), 2)\n# stats_obj['min'] = round(min(values), 2)\n# stats_obj['ave'] = round(sum(values) / len(values), 2)\n# if channel.name in ['GASS', 'ELEC']:\n# total_obj = SensorReading.objects.filter(\n# sensor=sensor, channel=channel, timestamp__gte=start, timestamp__lte=end).aggregate(total=Sum('value'))\n# pre_mult = total_obj['total'] / 2\n# cost = 0\n# if channel.name == 'GASS':\n# cost = pre_mult * deployment.gas_pence_per_kwh\n# else:\n# cost = pre_mult * deployment.elec_pence_per_kwh\n# stats_obj['cost'] = cost\n\n# return stats_obj\n\n\ndef get_devices(request, pk):\n \"\"\" JSON data for the annotation view, including annotations, and pairs of sensors\n (sensor name and channel).\"\"\"\n deployment = Deployment.objects.get(pk=pk)\n annotations = DeploymentAnnotation.objects.filter(deployment=pk)\n\n form = SampledIntervalForm(request.GET)\n if not form.is_valid():\n return HttpResponseBadRequest(\"Invalid Parameters\")\n\n requested_interval = form.cleaned_data['sampling_interval']\n start = form.cleaned_data['start']\n end = form.cleaned_data['end']\n\n out = {'sensors': [], 'annotations': [], 'stats': []}\n\n for annotation in annotations:\n obj = to_dict(annotation)\n out['annotations'].append(obj)\n\n for sensorpair in deployment.pairs.all():\n sensor = sensorpair.sensor\n channel = sensorpair.channel\n\n sensor_obj = {}\n sensor_obj['name'] = sensor.name\n # sensor_obj['location'] = sensor.deployment_details.filter(deployment__pk=pk)[0].location\n sensor_obj['channels'] = []\n sensor_obj['id'] = sensor.id\n channel_obj = {}\n channel_obj['id'] = channel.id\n channel_obj['name'] = channel.name\n channel_obj['selected'] = False\n if sensorpair.colour.exists():\n channel_obj['colour'] = sensorpair.colour.get().colour\n else:\n channel_obj['colour'] = 'hsla(281,93%,79%,1)'\n\n channel_obj['friendly_name'] = channel.name\n channel_obj['unit'] = channel.unit\n sensor_obj['channels'].append(channel_obj)\n\n out['sensors'].append(sensor_obj)\n return HttpResponse(json.dumps(out), content_type='application/json')\n","sub_path":"src/egenie/graphs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"310126271","text":"import numpy as np\n\ndef GetFreq(fileName):\n file = open(fileName,\"r\")\n text = file.read()\n file.close()\n text = str.lower(text)\n text = text.replace(\" \",\"\")\n text = text.replace(\".\",\"\")\n \n counts = np.zeros(26)\n correlations = np.zeros((26,26))\n \n for i in range(26):\n counts[i] = text.count(chr(97+i))\n if (counts[i] > 0):\n for j in range(26):\n correlations[i,j] = text.count(chr(97+i)+chr(97+j))/float(counts[i])\n \n\n total = float(sum(counts))\n frequencies = counts/total\n total = float(sum(sum(correlations)))\n correlations = correlations/total\n return [frequencies,correlations]\n","sub_path":"project/decoding/GetFrequencies.py","file_name":"GetFrequencies.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"173031628","text":"#!/usr/bin/python3\n\n# addDynamoDBRecords.py\n# ---------------------\n#\n# It would be better to do this in the terraform file, but there is an error in terraform that adding new records will give\n# an error. When this is fixed, this script will be deleted and will become part of the terraform_infra.tf config file.\n#\n# See also: https://github.com/terraform-providers/terraform-provider-aws/issues/12545 and \n# https://github.com/FrederiqueRetsema/TerraformDynamoDBIssue\n#\n# This script is called by init-infra.sh. Don't start it manually, use init-infra.sh for that. \n\nimport sys\nimport boto3\n\n# get_parameters\n# --------------\n\ndef get_parameters():\n\n if (len(sys.argv) != 2):\n print (\"Add the name_prefix and name_postfix as an argument, f.e. ./addDynamoDBRecords.py AMIS\")\n print (\"This will add the records for 2 shops (AMIS1 and AMIS2) to the database\")\n sys.exit(1)\n\n name_prefix = sys.argv[1]\n\n return {\"name_prefix\": name_prefix}\n\n# add_records\n# -----------\n\ndef add_records(name_prefix):\n\n dynamodb = boto3.client(\"dynamodb\")\n for shop_no in range(2):\n\n dynamodb.put_item(\n TableName= name_prefix + '-shops',\n Item={\n 'shop_id' : {'S': name_prefix + str(shop_no+1)},\n 'record_type' : {'S': 's-00098'},\n 'stock' : {'N': '100000'},\n 'gross_turnover' : {'N': '0'},\n 'gross_number' : {'N': '0'},\n 'item_description' : {'S': '250 g Butter'},\n 'selling_price' : {'N': '2.45'}})\n \n dynamodb.put_item(\n TableName= name_prefix + '-shops',\n Item={\n 'shop_id' : {'S': name_prefix + str(shop_no+1)},\n 'record_type' : {'S': 's-12345'},\n 'stock' : {'N': '100000'},\n 'gross_turnover' : {'N': '0'},\n 'gross_number' : {'N': '0'},\n 'item_description' : {'S': '1 kg Chees'},\n 'selling_price' : {'N': '12.15'}})\n \n dynamodb.put_item(\n TableName= name_prefix + '-shops',\n Item={\n 'shop_id' : {'S': name_prefix + str(shop_no+1)},\n 'record_type' : {'S': 's-81279'},\n 'stock' : {'N': '100000'},\n 'gross_turnover' : {'N': '0'},\n 'gross_number' : {'N': '0'},\n 'item_description' : {'S': '10 Eggs'},\n 'selling_price' : {'N': '1.99'}})\n\n dynamodb.put_item(\n TableName= name_prefix + '-shops',\n Item={\n 'shop_id' : {'S': name_prefix + str(shop_no+1)},\n 'record_type' : {'S': 's-90001'},\n 'stock' : {'N': '100000'},\n 'gross_turnover' : {'N': '0'},\n 'gross_number' : {'N': '0'},\n 'item_description' : {'S': 'Test object smoke- and performancetests (1)'},\n 'selling_price' : {'N': '1'}})\n\n dynamodb.put_item(\n TableName= name_prefix + '-shops',\n Item={\n 'shop_id' : {'S': name_prefix + str(shop_no+1)},\n 'record_type' : {'S': 's-90002'},\n 'stock' : {'N': '100000'},\n 'gross_turnover' : {'N': '0'},\n 'gross_number' : {'N': '0'},\n 'item_description' : {'S': 'Test object smoke- and performancetests (2)'},\n 'selling_price' : {'N': '2'}})\n\n return\n\n# Main program\n# ============\n\nresponse = get_parameters()\nname_prefix = response[\"name_prefix\"]\n\nadd_records(name_prefix)\n\n","sub_path":"shop-3/init-infra/addDynamoDBRecords.py","file_name":"addDynamoDBRecords.py","file_ext":"py","file_size_in_byte":3482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"386086568","text":"import tensorflow as tf\nimport helper\n\n\nimage_shape = (160, 576)\n\ndata_dir = './data'\nruns_dir = './runs'\n\nwith tf.Session() as sess: \n\t#First let's load meta graph and restore weights\n\tsaver = tf.train.import_meta_graph('./save/model.meta')\n\tsaver.restore(sess,tf.train.latest_checkpoint('./save/'))\n\n\tgraph = tf.get_default_graph()\n\timage_input = graph.get_tensor_by_name('image_input:0')\n\tkeep_prob = graph.get_tensor_by_name('keep_prob:0')\n\tlogits = graph.get_tensor_by_name('logits:0')\n \n\thelper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, image_input)\n\n\n\n","sub_path":"restore.py","file_name":"restore.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"556611916","text":"from functools import partial\n\nimport numpy as np\nfrom scipy.special import erf\nfrom scipy.stats import gamma\n\nfrom .parameters import FixedParameter, AdaproxParameter, DEFAULT_FACTOR\nfrom .frame import CartesianFrame, EllipseFrame\nfrom ..bbox import overlapped_slices\nfrom ..parameter import relative_step\n\n\n# Some operations fail at the origin in radial coordinates,\n# so we make use of a very small offset.\nMIN_RADIUS = 1e-20\n\n# Useful constants\nSQRT_PI_2 = np.sqrt(np.pi/2)\n\n# Stored sersic constants\nSERSIC_B1 = gamma.ppf(0.5, 2)\n\n\ndef gaussian2d(params, ellipse):\n \"\"\"Model of a 2D elliptical gaussian\n\n Parameters\n ----------\n params: `numpy.ndarray`\n The parameters of the function.\n In this case there are none outside of the ellipticity\n ellipse: `EllipseFrame`\n The ellipse parameters to scale the radius in all directions.\n\n Returns\n -------\n result: `numpy.ndarray`\n The 2D guassian for the given ellipse parameters\n \"\"\"\n return np.exp(-ellipse.R2)\n\n\ndef grad_gaussian(input_grad, params, cls, morph, sed, ellipse):\n \"\"\"Gradient of the the component model wrt the Gaussian morphology parameters\n\n Parameters\n ----------\n input_grad: `numpy.ndarray`\n Gradient of the likelihood wrt the component model\n params: `numpy.ndarray`\n The parameters of the morphology.\n cls: `LiteComponent`\n The component of the model that contains the morphology.\n morph: `numpy.ndarray`\n The model of the morphology.\n sed: `numpy.ndarray`\n The model of the SED.\n ellipse: `EllipseFrame`\n The ellipse parameters to scale the radius in all directions.\n \"\"\"\n # Calculate the gradient of the likelihod\n # wrt the Gaussian e^-r**2\n _grad = np.zeros(cls.bbox.shape, dtype=morph.dtype)\n _grad[cls.slices[1]] = input_grad[cls.slices[0]]\n _grad = -morph*np.einsum(\"i,i...\", sed, _grad)\n dY0 = ellipse.grad_y0(_grad, True)\n dX0 = ellipse.grad_x0(_grad, True)\n dSigmaY = ellipse.grad_major(_grad, True)\n dSigmaX = ellipse.grad_minor(_grad, True)\n dTheta = ellipse.grad_theta(_grad, True)\n return np.array([dY0, dX0, dSigmaY, dSigmaX, dTheta], dtype=params.dtype)\n\n\ndef circular_gaussian(center, frame, sigma):\n \"\"\"Model of a circularly symmetric Gaussian\n\n Parameters\n ----------\n center: `numpy.ndarray`\n The center of the Gaussian.\n frame: `CartesianFrame`\n The frame in which to generate the image of the circular Gaussian\n sigma: `float`\n The standard deviation.\n\n Returns\n -------\n result: `numpy.ndarray`\n The image of the circular Gaussian.\n \"\"\"\n y0, x0 = center[:2]\n two_sigma = 2*sigma\n r2 = ((frame.X - x0)/two_sigma)**2 + ((frame.Y - y0)/two_sigma)**2\n return np.exp(-r2)\n\n\ndef grad_circular_gaussian(input_grad, params, cls, morph, sed, frame, sigma):\n \"\"\"Gradient of the the component model wrt the Gaussian morphology parameters\n\n Parameters\n ----------\n input_grad: `numpy.ndarray`\n Gradient of the likelihood wrt the component model\n params: `numpy.ndarray`\n The parameters of the morphology.\n cls: `LiteComponent`\n The component of the model that contains the morphology.\n morph: `numpy.ndarray`\n The model of the morphology.\n sed: `numpy.ndarray`\n The model of the SED.\n frame: `CartesianFrame`\n The frame in which to generate the image of the circular Gaussian.\n \"\"\"\n # Calculate the gradient of the likelihod\n # wrt the Gaussian e^-r**2\n _grad = np.zeros(cls.bbox.shape, dtype=morph.dtype)\n _grad[cls.slices[1]] = input_grad[cls.slices[0]]\n _grad = -morph*np.einsum(\"i,i...\", sed, _grad)\n\n y0, x0 = params[:2]\n dY0 = -2*np.sum((frame.Y - y0)*_grad)\n dX0 = -2*np.sum((frame.X - x0)*_grad)\n return np.array([dY0, dX0], dtype=params.dtype)\n\n\ndef integrated_gaussian(params, frame):\n \"\"\"Model of a circularly symmetric Gaussian integrated over pixels\n\n This differs from `circularGaussian` because the gaussian function\n is integrated over each pixel to replicate the pixelated image\n version of a Gaussian function.\n\n Parameters\n ----------\n params: `numpy.ndarray`\n The center of the Gaussian.\n frame: `CartesianFrame`\n The frame in which to generate the image of the circular Gaussian\n\n Returns\n -------\n result: `numpy.ndarray`\n The image of the circular Gaussian.\n \"\"\"\n # Unpack the parameters and define constants\n y0, x0, sigma = params\n r = np.sqrt((frame.X - x0)**2 + (frame.Y - y0)**2)\n sqrt_c = 1/np.sqrt(2)/sigma\n # Integrate from half a pixel left and right\n lhs = erf((r - 0.5)*sqrt_c)\n rhs = erf((r + 0.5)*sqrt_c)\n z = 0.5*np.sqrt(np.pi)/sqrt_c*(rhs - lhs)\n return z\n\n\ndef grad_integrated_gaussian(input_grad, params, cls, morph, sed, frame):\n \"\"\"Gradient of the the component model wrt the Gaussian morphology parameters\n\n Parameters\n ----------\n input_grad: `numpy.ndarray`\n Gradient of the likelihood wrt the component model\n params: `numpy.ndarray`\n The parameters of the morphology.\n cls: `LiteComponent`\n The component of the model that contains the morphology.\n morph: `numpy.ndarray`\n The model of the morphology.\n sed: `numpy.ndarray`\n The model of the SED.\n frame: `CartesianFrame`\n The frame in which to generate the image of the circular Gaussian.\n \"\"\"\n # Calculate the gradient of the likelihood\n # wrt the Gaussian e^-r**2\n _grad = np.zeros(cls.bbox.shape, dtype=morph.dtype)\n _grad[cls.slices[1]] = input_grad[cls.slices[0]]\n _grad = np.einsum(\"i,i...\", sed, _grad)\n\n # Extract the parameters\n y0, x0, sigma = params\n # define useful constants\n x = frame.X - x0\n y = frame.Y - y0\n c = 0.5/sigma**2\n sqrt_c = np.sqrt(c)\n # Add a small constant to the radius to prevent a divergence at r==0\n r = np.sqrt(x**2 + y**2 + MIN_RADIUS)\n # Shift half a pixel in each direction for the integration\n r1 = r - 0.5\n r2 = r + 0.5\n # Calculate the gradient of the ERF wrt. each shifted radius\n dModel1 = np.exp(-c*r1**2)\n dModel2 = np.exp(-c*r2**2)\n # Calculate the gradients of the parameters\n dX0 = np.sum(-x/r*(dModel2 - dModel1)*_grad)\n dY0 = np.sum(-y/r*(dModel2 - dModel1)*_grad)\n dSigma1 = -(r1*dModel1/sigma - SQRT_PI_2*erf(r1*sqrt_c))\n dSigma2 = -(r2*dModel2/sigma - SQRT_PI_2*erf(r2*sqrt_c))\n dSigma = np.sum((dSigma2 - dSigma1)*_grad)\n\n return np.array([dY0, dX0, dSigma])\n\n\ndef bounded_prox(params, prox_step, proxmin, proxmax):\n \"\"\"A bounded proximal operator\n\n This function updates `params` in place.\n\n Parameters\n ----------\n params: `numpy.ndarray`\n The array of parameters to constrain.\n prox_step: `float`\n A scaling parameter used in some proximal operators\n in proxmin, but ignored here.\n proxmin: `numpy.ndarray`\n The array of minimum values for each parameter.\n proxmax: `numpy.ndarray`\n The array of maximum values for each parameter.\n\n Returns\n -------\n result: `numpy.ndarray`\n The updated parameters.\n \"\"\"\n cuts = params < proxmin\n params[cuts] = proxmin[cuts]\n cuts = params > proxmax\n params[cuts] = proxmax[cuts]\n return params\n\n\ndef sersic(params, ellipse):\n \"\"\"Generate a Sersic Model\n\n Parameters\n ----------\n params: `numpy.ndarray`\n The parameters of the function.\n In this case the only parameter is the sersic index ``n``.\n n: `float`\n The seric index. To avoid having too many\n degrees of freedom, we do not attempt to fit n,\n and typically use either `n=0` (exponential/disk profile) or\n `n=4` (de Vaucouleurs profile).\n ellipse: `EllipseFrame`\n The ellipse parameters to scale the radius in all directions.\n\n Returns\n -------\n result: `numpy.ndarray`\n The 2D guassian for the given ellipse parameters\n \"\"\"\n n, = params\n\n r = ellipse.R\n\n if n == 1:\n result = np.exp(-SERSIC_B1*r)\n else:\n bn = gamma.ppf(0.5, 2*n)\n result = np.exp(-bn*(r**(1/n) - 1))\n return result\n\n\ndef grad_sersic(input_grad, params, cls, morph, sed, ellipse):\n \"\"\"Gradient of the component model wrt the Gaussian morphology parameters\n\n Parameters\n ----------\n input_grad: `numpy.ndarray`\n Gradient of the likelihood wrt the component model\n params: `numpy.ndarray`\n The parameters of the morphology.\n cls: `LiteComponent`\n The component of the model that contains the morphology.\n morph: `numpy.ndarray`\n The model of the morphology.\n sed: `numpy.ndarray`\n The model of the SED.\n ellipse: `EllipseFrame`\n The ellipse parameters to scale the radius in all directions.\n \"\"\"\n n = params[5]\n bn = gamma.ppf(0.5, 2*n)\n if n == 1:\n # Use a simplified model for faster calculation\n dExp = -SERSIC_B1*morph\n else:\n r = ellipse.R\n dExp = -bn/n*morph*r**(1 / n - 1)\n\n _grad = np.zeros(cls.bbox.shape, dtype=morph.dtype)\n _grad[cls.slices[1]] = input_grad[cls.slices[0]]\n _grad = np.einsum(\"i,i...\", sed, _grad)\n dN = np.sum(_grad*bn*morph*ellipse.R**(1/n)*np.log10(ellipse.R)/n**2)\n _grad = _grad*dExp\n dY0 = ellipse.grad_y0(_grad, False)\n dX0 = ellipse.grad_x0(_grad, False)\n dSigmaY = ellipse.grad_major(_grad, False)\n dSigmaX = ellipse.grad_minor(_grad, False)\n dTheta = ellipse.grad_theta(_grad, False)\n return np.array([dY0, dX0, dSigmaY, dSigmaX, dTheta, dN], dtype=params.dtype)\n\n\nclass ParametricComponent:\n \"\"\"A parametric model of an astrophysical source\n \"\"\"\n\n def __init__(self, sed, morph_params, morph_func, morph_grad, morph_prox,\n morph_step, model_frame, bbox, prox_sed=None, floor=1e-20):\n \"\"\"Initialize the component\n\n Parameters\n ----------\n sed: `numpy.ndarray`\n The SED of the component.\n morph_params: `numpy.ndarray`\n The parameters of the morphology.\n morph_func: `Callable`\n The function to generate the 2D morphology image\n based on `morphParams`.\n morph_grad: `Callable`\n The function to calculate the gradient of the\n likelihood wrt the morphological parameters.\n morph_prox: `Callable`\n The proximal operator for the morphology parameters.\n bbox: `scarlet.bbox.Box`\n The bounding box that holds the model.\n frame: `CartesianGrid`\n The coordinates of the model frame,\n used to speed up the creation of the\n polar grid for each source.\n prox_sed: `Function`\n Proximal operator for the SED.\n If `prox_sed` is `None` then the default proximal\n operator `self.prox_sed` is used.\n floor: `float`\n The minimum value of the SED, used to prevent\n divergences in the gradients.\n \"\"\"\n params = FixedParameter(morph_params)\n sed = FixedParameter(sed)\n\n self._params = params\n self._func = morph_func\n self._morph_grad = morph_grad\n self._morph_prox = morph_prox\n self._morph_step = morph_step\n self._sed = sed\n self._bbox = bbox\n if prox_sed is None:\n self._prox_sed = self.prox_sed\n else:\n self._prox_sed = prox_sed\n self.slices = overlapped_slices(model_frame.bbox, bbox)\n self.floor = floor\n\n @property\n def center(self):\n \"\"\"The center of the component\"\"\"\n return self.y0, self.x0\n\n @property\n def y0(self):\n \"\"\"The y-center of the component\"\"\"\n return self._params.x[0]\n\n @property\n def x0(self):\n \"\"\"The x-center of the component\"\"\"\n return self._params.x[1]\n\n @property\n def sed(self):\n \"\"\"The SED of the component\"\"\"\n return self._sed.x\n\n @property\n def bbox(self):\n \"\"\"The bounding box that contains the component\"\"\"\n return self._bbox\n\n @property\n def frame(self):\n \"\"\"The coordinate system that contains the model\"\"\"\n return CartesianFrame(self._bbox)\n\n @property\n def radial_params(self):\n \"\"\"The parameters used to model the radial function\"\"\"\n return self._params.x\n\n def _morph(self, frame=None):\n \"\"\"The 2D image of the morphology\n\n This callable generates an image of the morphology\n in the given frame.\n\n\n Parameters\n ----------\n frame: `CartesianFrame`\n The frame (bounding box, pixel grid) that the image is\n placed in.\n\n Returns\n -------\n result: `numpy.ndarray`\n The image of the morphology in the `frame`.\n \"\"\"\n if frame is None:\n frame = self.frame\n return self._func(self.radial_params, frame)\n\n @property\n def morph(self, frame=None):\n \"\"\"The morphological model\"\"\"\n return self._morph()\n\n @property\n def morph_prox(self):\n \"\"\"The function used to constrain the morphological model\"\"\"\n return self._morph_prox\n\n @property\n def morph_grad(self):\n \"\"\"The function that calculates the gradient of the morphological model\"\"\"\n return self._morph_grad\n\n @property\n def morph_step(self):\n \"\"\"The function that calculates the gradient of the morphological model\"\"\"\n return self._morph_step\n\n def get_model(self, bbox=None, frame=None):\n \"\"\"Generate the full model for this component\"\"\"\n model = self.sed[:, None, None] * self._morph(frame)[None, :, :]\n\n if bbox is not None:\n slices = overlapped_slices(bbox, self.bbox)\n _model = np.zeros(bbox.shape, self.morph.dtype)\n _model[slices[0]] = model[slices[1]]\n model = _model\n return model\n\n def prox_sed(self, sed, prox_step=0):\n \"\"\"Apply a prox-like update to the SED\n\n Parameters\n ----------\n sed: `numpy.ndarray`\n The SED of the model.\n prox_step: `float`\n A scaling parameter used in some proximal operators,\n but ignored here.\n \"\"\"\n # prevent divergent SED\n sed[sed < self.floor] = self.floor\n return sed\n\n def grad_sed(self, input_grad, sed, morph):\n \"\"\"Gradient of the SED wrt. the component model\n\n Parameters\n ----------\n input_grad: `numpy.ndarray`\n Gradient of the likelihood wrt the component model\n sed: `numpy.ndarray`\n The model of the SED.\n morph: `numpy.ndarray`\n The model of the morphology.\n\n Returns\n -------\n result: `float`\n The gradient of the likelihood wrt. the SED.\n \"\"\"\n _grad = np.zeros(self.bbox.shape, dtype=self.sed.dtype)\n _grad[self.slices[1]] = input_grad[self.slices[0]]\n return np.einsum(\"...jk,jk\", _grad, morph)\n\n def update(self, it, input_grad):\n \"\"\"Update the component parameters from an input gradient\n\n Parameters\n ----------\n it: `int`\n The current iteration of the optimizer.\n input_grad: `numpy.ndarray`\n Gradient of the likelihood wrt the component model\n \"\"\"\n sed = self.sed.copy()\n morph = self.morph\n self._sed.update(it, input_grad, morph)\n self._params.update(it, input_grad, self, morph, sed, self.frame)\n\n def resize(self):\n \"\"\"Resize the box that contains the model\n\n Not yet implemented, so for now the model box\n does not grow. In the long run this will be\n based on a cutoff value for the model.\n \"\"\"\n return False\n\n def init_adaprox(self, noise_rms, max_prox_iter=1, factor=10):\n \"\"\"Convert all of the parameters into adaprox parameters\n\n Parameters\n ----------\n noise_rms: `numpy.ndarray`\n The RMS noise in each band.\n max_prox_iter: `int`\n Maximum number of proximal iterations.\n factor: `int`\n The factor to scale the noise to set the\n SED step.\n \"\"\"\n self._sed = AdaproxParameter(\n self._sed.x,\n step=partial(relative_step, factor=DEFAULT_FACTOR, minimum=noise_rms/factor),\n max_prox_iter=max_prox_iter,\n prox=self._prox_sed,\n grad=self.grad_sed,\n )\n self._params = AdaproxParameter(\n self._params.x,\n step=self.morph_step,\n max_prox_iter=max_prox_iter,\n prox=self.morph_prox,\n grad=self.morph_grad,\n )\n\n\nclass EllipticalParametricComponent(ParametricComponent):\n \"\"\"A radial density/surface brightness profile with elliptical symmetry\n \"\"\"\n\n def __init__(self, sed, morph_params, morph_func, morph_grad, morph_prox, morph_step,\n bbox, model_frame, prox_sed=None, floor=1e-20):\n \"\"\"Initialize the component\n\n Parameters\n ----------\n sed: `numpy.ndarray`\n The SED of the component.\n morph_params: `numpy.ndarray`\n The parameters passed to `morph_func` to\n generate the morphology in image space.\n morph_func: `Function`\n The function to generate the morphology\n based on `morphParams`.\n morph_grad: `Function`\n The function to calculate the gradient of the\n likelihood wrt the morphological parameters.\n morph_prox: `Function`\n The proximal operator for the morphology parameters.\n bbox: `scarlet.bbox.Box`\n The bounding box that holds the model.\n frame: `CartesianGrid`\n The coordinates of the model frame,\n used to speed up the creation of the\n polar grid for each source.\n prox_sed: `Function`\n Proximal operator for the SED.\n If `prox_sed` is `None` then the default proximal\n operator `self.prox_sed` is used.\n floor: `float`\n The minimum value of the SED, used to prevent\n divergences in the gradients.\n \"\"\"\n super().__init__(\n sed=sed,\n morph_params=morph_params,\n morph_func=morph_func,\n morph_grad=morph_grad,\n morph_prox=morph_prox,\n morph_step=morph_step,\n model_frame=model_frame,\n bbox=bbox,\n prox_sed=prox_sed,\n floor=floor\n )\n\n @property\n def semi_major(self):\n \"\"\"The length of the semi-major axis of the model\"\"\"\n return self._params.x[2]\n\n @property\n def semi_minor(self):\n \"\"\"The length of the semi-minor axis of the model\"\"\"\n return self._params.x[3]\n\n @property\n def theta(self):\n \"\"\"The counter-clockwise rotation angle of the model from the x-axis.\"\"\"\n return self._params.x[4]\n\n @property\n def ellipse_params(self):\n \"\"\"The parameters used to generate the scaled radius\"\"\"\n return self._params.x[:5]\n\n @property\n def radial_params(self):\n \"\"\"The parameters used to model the radial function\"\"\"\n return self._params.x[5:]\n\n @property\n def frame(self):\n \"\"\"The `EllipseFrame` that parameterizes the model\"\"\"\n return EllipseFrame(*self.ellipse_params, self._bbox)\n\n @property\n def morph_prox(self):\n \"\"\"The function used to constrain the morphological model\"\"\"\n return self._morph_prox\n\n @property\n def morph_grad(self):\n \"\"\"The function that calculates the gradient of the morphological model\"\"\"\n return self._morph_grad\n\n def update(self, it, input_grad):\n \"\"\"Update the component\n\n Parameters\n ----------\n it: `int`\n The current iteration of the optimizer.\n input_grad: `numpy.ndarray`\n Gradient of the likelihood wrt the component model\n \"\"\"\n ellipse = self.frame\n sed = self.sed.copy()\n morph = self._func(self.radial_params, ellipse)\n self._sed.update(it, input_grad, morph)\n self._params.update(it, input_grad, self, morph, sed, ellipse)\n\n def resize(self):\n \"\"\"Resize the box that contains the model\n\n Not yet implemented, so for now the model box\n does not grow. In the long run this will be\n based on a cutoff value for the model.\n \"\"\"\n return False","sub_path":"scarlet/lite/component.py","file_name":"component.py","file_ext":"py","file_size_in_byte":20543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"443609096","text":"import csv\n\n# with open('numberfy-input.txt', encoding = \"ISO-8859-1\") as csv_file:\n# csv_reader = csv.reader(csv_file, delimiter='\\n')\n# arr = []\n# for r in csv_reader: \n# s = r[0]\n# arr[i].split(' (')\n# arr[1]=arr[1][:-1]\n\nwith open('listing-type-tm-input.txt') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter='\\n')\n arr = []\n i=0\n sum = 0\n\n for r in csv_reader: \n x = r[0].split(' (')\n arr.append(x)\n arr[i][1] = arr[i][1][:-1]\n i+=1\n\n \n with open ('listing-type-tm-output.txt', 'w') as f:\n f.write('LISTING TYPES: \\n\\n')\n for key in arr:\n f.write(key[0] + ': ' + key[1] + '\\n')\n sum = sum + int(key[1])\n f.write('\\n\\n' + 'RESULTS FOR TRACKING MASTER \\n')\n f.write('Total Listings: ' + str(sum) + '\\n')\n # print(arr[4])\n f.write('Event Listings (700+): ' + str(arr[4][1]) +'\\n')\n # print(arr[2])\n f.write('Business Events Listings (70+): ' + str(arr[2][1]) + '\\n')\n print('business events: ' + str(arr[2][1]) + ', events: ' + str(arr[4][1]) + ', movies: ' + str(arr[7][1]) + ', workshops: ' + str(arr[13][1]))\n permanentListings = int(sum) - int(arr[2][1]) - int(arr[4][1]) - int(arr[7][1]) - int(arr[13][1])\n f.write('Non-Events/Permanent Listings: ' + str(permanentListings) + '\\n')\n\n\nprint('''\\n\\nInput Order Example: \\n\nAccommodation\nArt Galleries, Museums & Libraries\nBusiness Events\nClubs & Associations\nEvents\nFood & Drink\nFunction Venues\nMovies\nPlaces To Go\nShopping\nThings To Do\nTravel Services\nWineries\nWorkshops and Short Courses''')\n # f.write()\n \n\n\n","sub_path":"python/listing-type-tm/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"573105562","text":"# Train two simple deep CNN on the Yelp small images (64 x 64 pixels) dataset in Griffin cluster.\n# Based on the Keras examples by Francois Chollet, available at:\n# https://github.com/fchollet/keras/blob/master/examples/cifar10_cnn.py\n# GPU Usage: `THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python convnet_griffin.py`\nfrom __future__ import print_function\n\nimport numpy as np\nfrom keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D\nfrom keras.layers.core import Dense, Dropout, Activation, Flatten\nfrom keras.models import Sequential\nfrom keras.optimizers import SGD\nfrom keras.preprocessing.image import ImageDataGenerator\n\nfrom memload.load_yelp_data import yelp_data\n\nMODELS = ['VGG_16', 'VGG_19', 'googlenet', 'inception_v3']\n\ndef train():\n model_ = 'VGG_16'\n batch_size = 8\n nb_classes = 5\n nb_epoch = 200\n data_augmentation = True\n\n # input image dimensions\n if model_ in MODELS[0:2]:\n img_rows, img_cols = 224, 224\n if model_ in MODELS[3]:\n img_rows, img_cols = 299, 299\n # the Yelp images are RGB\n img_channels = 3\n\n # the data, shuffled and split between train and test sets\n (X_train, y_train), (X_test, y_test) = yelp_data(dtype=np.float32, grayscale=False, pixels=img_rows, batches=3,\n model='VGG_16', data_dir='/home/rcamachobarranco/datasets')\n print('X_train shape:', X_train.shape)\n print(X_train.shape[0], 'train samples')\n print(X_test.shape[0], 'test samples')\n\n # generate model\n model = VGG_16(img_rows, img_cols, img_channels, nb_classes)\n\n # let's train the model using SGD + momentum\n sgd = SGD(lr=0.0001, decay=1e-6, momentum=0.9, nesterov=True)\n model.compile(loss='categorical_crossentropy', optimizer=sgd)\n\n if not data_augmentation:\n print('Not using data augmentation.')\n model.fit(X_train, y_train, batch_size=batch_size,\n nb_epoch=nb_epoch, show_accuracy=True,\n validation_data=(X_test, y_test), shuffle=True)\n else:\n print('Using real-time data augmentation.')\n\n # this will do preprocessing and realtime data augmentation\n datagen = ImageDataGenerator(\n featurewise_center=False, # set input mean to 0 over the dataset\n samplewise_center=False, # set each sample mean to 0\n featurewise_std_normalization=False, # divide inputs by std of the dataset\n samplewise_std_normalization=False, # divide each input by its std\n zca_whitening=False, # apply ZCA whitening\n rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)\n width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)\n height_shift_range=0.1, # randomly shift images vertically (fraction of total height)\n horizontal_flip=True, # randomly flip images\n vertical_flip=False) # randomly flip images\n\n # compute quantities required for featurewise normalization\n # (std, mean, and principal components if ZCA whitening is applied)\n datagen.fit(X_train)\n\n # fit the model on the batches generated by datagen.flow()\n model.fit_generator(datagen.flow(X_train, y_train, batch_size=batch_size),\n samples_per_epoch=X_train.shape[0],\n nb_epoch=nb_epoch, show_accuracy=True,\n validation_data=(X_test, y_test),\n nb_worker=1)\n\n\n# Define the VGG-16 model structure\ndef VGG_16(img_rows, img_cols, img_channels=3, nb_classes=5, weights_path=None):\n model = Sequential()\n model.add(ZeroPadding2D((1, 1), input_shape=(img_channels, img_rows, img_cols)))\n model.add(Convolution2D(64, 3, 3, activation='relu'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(64, 3, 3, activation='relu'))\n model.add(MaxPooling2D((2, 2), strides=(2, 2)))\n\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(128, 3, 3, activation='relu'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(128, 3, 3, activation='relu'))\n model.add(MaxPooling2D((2, 2), strides=(2, 2)))\n\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(256, 3, 3, activation='relu'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(256, 3, 3, activation='relu'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(256, 3, 3, activation='relu'))\n model.add(MaxPooling2D((2, 2), strides=(2, 2)))\n\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(512, 3, 3, activation='relu'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(512, 3, 3, activation='relu'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(512, 3, 3, activation='relu'))\n model.add(MaxPooling2D((2, 2), strides=(2, 2)))\n\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(512, 3, 3, activation='relu'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(512, 3, 3, activation='relu'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Convolution2D(512, 3, 3, activation='relu'))\n model.add(MaxPooling2D((2, 2), strides=(2, 2)))\n\n model.add(Flatten())\n model.add(Dense(4096, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(4096, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(nb_classes, activation='softmax'))\n\n if weights_path:\n model.load_weights(weights_path)\n\n return model\n\n\n# Define the simple CIFAR10-based model\ndef CIFAR_10(img_rows, img_cols, img_channels=3, nb_classes=5, weights_path=None):\n model = Sequential()\n\n model.add(Convolution2D(32, 3, 3, border_mode='same',\n input_shape=(img_channels, img_rows, img_cols)))\n model.add(Activation('relu'))\n model.add(Convolution2D(32, 3, 3))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Convolution2D(64, 3, 3, border_mode='same'))\n model.add(Activation('relu'))\n model.add(Convolution2D(64, 3, 3))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Flatten())\n model.add(Dense(512))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n model.add(Dense(nb_classes))\n model.add(Activation('softmax'))\n\n if weights_path:\n model.load_weights(weights_path)\n\n return model\n\n\ntrain()\n","sub_path":"convnets/convnet_keras_griffin.py","file_name":"convnet_keras_griffin.py","file_ext":"py","file_size_in_byte":6551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"5885505","text":"\"\"\" This should contain some nice functions and methods for dealing\nwith the repository structure \"\"\"\n\nimport os, sys\nimport kimservice\nfrom config import *\nlogger = logger.getChild(\"repository\")\n\n#======================================\n# Some kim api wrapped things\n#======================================\n\ndef valid_match(test,model,force=False):\n \"\"\" Test to see if a test and model match using the kim API, returns bool \"\"\"\n #logger.debug(\"attempting to match %r with %r\",testname,modelname)\n logger.debug(\"invoking KIMAPI for (%r,%r)\",test,model)\n pid = os.fork()\n if (pid==0):\n logger.debug(\"in fork\")\n match, pkim = kimservice.KIM_API_init(test.kim_code,model.kim_code)\n if match:\n kimservice.KIM_API_free(pkim)\n sys.exit(0)\n sys.exit(1)\n\n # try to get the exit code from the kim api process\n exitcode = os.waitpid(pid,0)[1]/256\n logger.debug(\"got exitcode: %r\" , exitcode )\n if exitcode == 0:\n match = True\n elif exitcode == 1:\n match = False\n else:\n logger.error(\"We seem to have a Kim init error on (%r,%r)\", test, model)\n raise KIMRuntimeError\n match = False\n\n if match:\n return True\n else:\n return False\n\n\n","sub_path":"dump/repository.py","file_name":"repository.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"305553615","text":"#Einfacher Taschenrechner\nprint (\"(c) Maximilian Spiekermann, 2016\")\n\n#Hauptfunktion\ndef main ():\n #Anwender kann Text eingeben\n userinput = input (\"\")\n if (userinput == \"\"):\n print (\"Bitte gebe etwas ein!\")\n main ()\n else:\n #Ersetze ungültige Operatoren\n userinput = userinput.replace (':', '/')\n userinput = userinput.replace (',', '.')\n userinput = userinput.replace ('^', '**')\n userinput = userinput.replace ('=', '==')\n\n #Versuche die Eingabe auszuwerten\n try:\n #Werte aus\n resultAsInt = eval (userinput)\n #Konvertiere Ergebnis zu einer Variable des Typen 'String'\n resultAsStr = str(resultAsInt)\n #Übersetze output\n resultAsStr = resultAsStr.replace('True', 'Das ist wahr.')\n resultAsStr = resultAsStr.replace('False', 'Das ist falsch.')\n #Zeige das Ergebnis an\n print (resultAsStr)\n #Falls die Eingabe ungültig ist\n except NameError:\n print (\"Bitte benutze keine ungültigen Buchstaben!\")\n except SyntaxError:\n print (\"Bitte benutze nur gültige Operatoren!\")\n except ZeroDivisionError:\n print (\"Du kannst nicht durch '0' teilen!\")\n #Im Falle eines anderen Fehlers\n finally:\n pass\n #Wiederhole die Funktion \n main ()\n\n#Führe main() das erste mal aus \ntry:\n main ()\n#Im Falle eines unbekannten Fehlers\nfinally:\n print (\"Ein Fehler ist aufgetreten. Bitte versuchen Sie es erneut.\\n\")\n main ()\n","sub_path":"Python/Calculator v1.py","file_name":"Calculator v1.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"650153034","text":"\"\"\"\n600. Smallest Rectangle Enclosing Black Pixels\nhttps://www.lintcode.com/problem/smallest-rectangle-enclosing-black-pixels/description\n九章算法课上的方法\n\n\"\"\"\nclass Solution:\n \"\"\"\n @param image: a binary matrix with '0' and '1'\n @param x: the location of one of the black pixels\n @param y: the location of one of the black pixels\n @return: an integer\n \"\"\"\n def minArea(self, image, x, y):\n # write your code here\n if not image or not image[0]:\n return 0\n n = len(image)\n m = len(image[0])\n\n left = self.binary_search_first(image, 0, y, self.check_col)\n right = self.binary_search_last(image, y, m - 1, self.check_col)\n up = self.binary_search_first(image, 0, x, self.check_row)\n down = self.binary_search_last(image, x, n - 1, self.check_row)\n\n print(left, right, up, down)\n return (right - left + 1) * (down - up + 1)\n\n def binary_search_first(self, image, start, end, check_func):\n while start + 1 < end:\n mid = (start + end) // 2\n if check_func(image, mid):\n end = mid\n else:\n start = mid\n if check_func(image, start):\n return start\n return end\n\n def binary_search_last(self, image, start, end, check_func):\n while start + 1 < end:\n mid = (start + end) // 2\n if check_func(image, mid):\n start = mid\n else:\n end = mid\n if check_func(image, end):\n return end\n return start\n\n def check_row(self, image, mid):\n m = len(image[0])\n for i in range(m):\n if image[mid][i] == '1':\n return True\n return False\n\n def check_col(self, image, mid):\n n = len(image)\n for i in range(n):\n if image[i][mid] == '1':\n return True\n return False\n","sub_path":"lintcode/600.py","file_name":"600.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"437584802","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport copy, math, random\n\nfrom utils import *\n\ndef simulated_annealing(problem, state):\n current = Queen(state) if problem == '8-queens' else Jigsaw(state)\n\n t = 30\n\n while True:\n if t == 0:\n return current\n\n for i in range(0, 300):\n neighbor = current.get_random_neighbor()\n delta = neighbor.price - current.price\n\n if delta < 0:\n current = copy.deepcopy(neighbor)\n if current.price == 0:\n return current\n else:\n if math.exp((-delta) / t) > random.random():\n current = copy.deepcopy(neighbor)\n\n t -= 1\n\n return current\n\ndef main():\n problem = parse()\n\n samples = []\n with open('samples.in', 'r') as f:\n samples = f.readlines()\n f.close()\n\n handle(problem, samples, simulated_annealing)\n\nif __name__ == '__main__':\n main()","sub_path":"local_search/simulated_annealing.py","file_name":"simulated_annealing.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"188100697","text":"#!/usr/bin/env python\n\n# Copyright (c) Twisted Matrix Laboratories.\n# See LICENSE for details.\n\n\"\"\"\nSimple Message feedback publisher. The server waits for connection and then starts publishing roboto feedback information.\n\"\"\"\n\nfrom __future__ import print_function\n\nfrom sys import stdout\nfrom twisted.python.log import startLogging\nfrom twisted.internet import interfaces, reactor, task, defer, protocol\nfrom twisted.internet.protocol import Factory, Protocol\nfrom twisted.internet.endpoints import TCP4ServerEndpoint\nimport construct as c2\nimport simple_message as sm\nfrom twisted.protocols.basic import LineReceiver\n\n\n\nclass feedbackPublisher(Protocol):\n def __init__(self):\n self.lc = task.LoopingCall(self.FeedbackMessage)\n\n\n def connectionMade(self):\n print('Connection made from {}'.format(self.transport.getPeer()))\n self.lc.start(0.5)\n\n def connectionLost(self, reason):\n print('Connection lost from {}'.format(self.transport.getPeer()))\n self.lc.stop()\n\n\n\n def dataReceived(self):\n print(\"connect\")\n\n def FeedbackMessage(self):\n joint_1 = 0.0\n joint_2 = 0.0\n joint_3 = 0.0\n joint_4 = 0.0\n joint_5 = 0.0\n joint_6 = 0.0\n\n SimpleMessage = c2.Struct(\n 'Header' / c2.Struct(\n 'msg_type' / c2.Int32sl,\n 'comm_type' / c2.Int32sl,\n 'reply_type' / c2.Int32sl,\n ),\n 'body' / c2.Struct(\n 'robot_id' / c2.Int32sl,\n 'valid_fields'/ c2.Int32sl,\n 'time' / c2.Float32l,\n 'positions' / c2.Float32l[10],\n 'velocities' / c2.Float32l[10],\n 'accelerations' / c2.Float32l[10],\n ),\n c2.Terminated\n )\n msg = dict(\n Header=dict(msg_type=15, comm_type=1, reply_type=0),\n body=dict(robot_id=0,valid_fields=0x02,time=0,\n positions=[joint_1, joint_2, joint_3, joint_4, joint_5, joint_6,0.0,0.0,0.0,0.0],\n velocities=[joint_1, joint_2, joint_3, joint_4, joint_5, joint_6,0.0,0.0,0.0,0.0],\n accelerations=[joint_1, joint_2, joint_3, joint_4, joint_5, joint_6,0.0,0.0,0.0,0.0]),\n )\n feedback_data = SimpleMessage.build(msg)\n #print(feedback_data)\n data_len = c2.Int32sl.build(len(feedback_data))\n print('sending')\n self.transport.write(data_len + feedback_data)\n\nclass feedbackfactory(Factory):\n protocol = feedbackPublisher\n\n\n\nstartLogging(stdout)\nendpoint=TCP4ServerEndpoint(reactor, 11002)\nendpoint.listen(feedbackfactory())\nreactor.run()\n","sub_path":"simple_message/feedback_producer.py","file_name":"feedback_producer.py","file_ext":"py","file_size_in_byte":2611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"437037176","text":"# Copyright 2017 Verily Life Sciences Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nr\"\"\"Import variant data in a VCF file to a BigQuery variants table.\n\nExample usage:\n\npython import_vcf_to_bigquery.py \\\n --source-vcf \"gs://BUCKET_NAME/PATH/TO/variants.vcf.gz\" \\\n --project \"PROJECT_ID\" \\\n --dataset \"DATASET_NAME\" \\\n --variantset \"VARIANTSET_NAME\" \\\n --destination-table \"PROJECT_ID.DATASET_NAME.TABLE_NAME\" \\\n --expand-wildcards\n\"\"\"\n\nimport argparse\nimport logging\n\nimport vcf_to_bigquery_utils\n\n\ndef _parse_arguments():\n \"\"\"Parses command line arguments.\n\n Returns:\n A Namespace of parsed arguments.\n \"\"\"\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\n \"--source-vcf\",\n nargs=\"+\",\n required=True,\n help=(\"Cloud Storage path[s] to [gzip-compressed] VCF file[s],\"\n \" wildcards accepted (* but not **).\"))\n parser.add_argument(\n \"--project\",\n required=True,\n help=\"Cloud project for imported Google Genomics data.\")\n parser.add_argument(\n \"--dataset\",\n required=True,\n help=(\"Google Genomics dataset name or id\"\n \" (existing datasets will be appended).\"))\n parser.add_argument(\n \"--variantset\",\n required=True,\n help=(\"Google Genomics variant set name or id\"\n \" (existing targets will be appended).\"))\n parser.add_argument(\n \"--new-dataset\",\n action=\"store_true\",\n help=\"Create a new dataset, even if one with this name exists.\")\n parser.add_argument(\n \"--new-variantset\",\n action=\"store_true\",\n help=\"Create a new variant set, even if one with this name exists.\")\n parser.add_argument(\n \"--expand-wildcards\",\n action=\"store_true\",\n help=\"Expand wildcards in VCF paths and use parallel imports.\")\n parser.add_argument(\n \"--destination-table\",\n required=True,\n help=\"Full path to destination BigQuery table \"\n \"(PROJECT_ID.DATASET_NAME.TABLE_NAME).\")\n parser.add_argument(\n \"--description\",\n help=\"Description for destination BigQuery table.\")\n\n return parser.parse_args()\n\n\ndef main():\n args = _parse_arguments()\n logging.basicConfig(level=logging.INFO)\n\n uploader = vcf_to_bigquery_utils.VcfUploader(args.project)\n uploader.upload_variants(dataset=args.dataset,\n variantset=args.variantset,\n source_vcfs=args.source_vcf,\n destination_table=args.destination_table,\n expand_wildcards=args.expand_wildcards,\n new_dataset=args.new_dataset,\n new_variantset=args.new_variantset,\n description=args.description)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"curation/tables/import_vcf_to_bigquery.py","file_name":"import_vcf_to_bigquery.py","file_ext":"py","file_size_in_byte":3340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"354406770","text":"from datetime import datetime\r\n\r\nbirthday = input(\"Insert Your Birthday Party In This Format (eg.: YYYY MM DD): \")\r\nspliting = birthday.split(\" \")\r\n\r\nprint(spliting)\r\n\r\nday = int(spliting[2])\r\nmonth = int(spliting[1])\r\nyear = int(spliting[0])\r\n\r\nleft = datetime(year, month, day) - datetime.now()\r\n \r\nprint(f\"Time left from your birthday is : {left.days} days.\")\r\n\r\n\r\n\r\n","sub_path":"Birthday.py","file_name":"Birthday.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"73714473","text":"#!/usr/bin/env python\n\n\n############################################################################\n#\n# MODULE: \tr.accumulate.erdep.py\n# AUTHOR(S):\t\tIsaac Ullah, Arizona State University\n# PURPOSE:\t\tTakes the output \"netchange\" maps and adds them chronologically\n#\t\t\tto make a series showing the \"to-date\" net change for each year. Good for animations.\n# ACKNOWLEDGEMENTS:\tNational Science Foundation Grant #BCS0410269 \n# COPYRIGHT:\t\t(C) 2007 by Isaac Ullah, Michael Barton, Arizona State University\n#\t\t\tThis program is free software under the GNU General Public\n#\t\t\tLicense (>=v2). Read the file COPYING that comes with GRASS\n#\t\t\tfor details.\n#\n#############################################################################\n\n\n#%Module\n#% description: Takes the output \"netchange\" maps and adds them chronologically to make a series showing the \"to-date\" net change for each year. Good for animations.\n#%END\n\n#%option\n#% key: pattern\n#% type: string\n#% gisprompt: old,cell,raster\n#% description: Pattern of first part of file names (prefixes) for map series (leave off #'s, but include any \"_\"'s or \".\"'s between the prefix and #)\n#% required : yes\n#%END\n#%option\n#% key: startnum\n#% type: integer\n#% description: Smallest number of the input map series (ie. # of the first map you'd like to include in the cumulative series).\n#% required : yes\n#%END\n#%option\n#% key: endnum\n#% type: integer\n#% description: Largest number of the input map series (ie. # of the last map you'd like to include in the cumulative series).\n#% required : yes\n#%END\n#%option\n#% key: digits\n#% type: integer\n#% description: Total number of digits for input (and output) numbers. (for padding with leading zeros. if zero, numbers are given no leading zeroes)\n#% required : yes\n#% answer: 0\n#%END\n#%option\n#% key: infix\n#% type: string\n#% description: Infix inserted between the prefix and number of the output maps\n#% answer: _cumseries_\n#% required : yes\n#%END\n#%option\n#% key: suffix\n#% type: string\n#% gisprompt: old,cell,raster\n#% description: Pattern of last part of file names (suffixes) for map series with infixed numbers (leave blank if numbers are on the end of the file name!!!)\n#% required : no\n#%END\n#%option\n#% key: statsout\n#% type: string\n#% description: Name of text file to write yearly cumulative erosion/deposition stats to (if blank, no stats file will be written)\n#% answer: erdep_stats.csv\n#% required : no\n#%END\n#%Flag\n#% key: e\n#% description: -e export all maps to PNG files in home directory (good for animation in other programs)\n#%End\n\nimport sys\nimport os\nimport tempfile\nimport subprocess\ngrass_install_tree = os.getenv('GISBASE')\nsys.path.append(grass_install_tree + os.sep + 'etc' + os.sep + 'python')\nimport grass.script as grass\n\ndef main():\n pattern = os.getenv(\"GIS_OPT_pattern\")\n startnum = int(os.getenv(\"GIS_OPT_startnum\"))\n endnum = int(os.getenv(\"GIS_OPT_endnum\"))\n infix = os.getenv(\"GIS_OPT_infix\")\n digits = int(os.getenv(\"GIS_OPT_digits\"))\n #create temp file for color rules\n temp = tempfile.NamedTemporaryFile()\n temp.write(\"100% 0 0 100\\n1 blue\\n0.5 indigo\\n0.01 green\\n0 white\\n-0.01 yellow\\n-0.5 orange\\n-1 red\\n0% 150 0 50\")\n temp.flush()\n #test to see if we make a stats file, and make it if true\n if bool(os.getenv(\"GIS_OPT_statsout\")) == True:\n statsfile = file(os.getenv(\"GIS_OPT_statsout\"), 'w')\n statsfile.write('Erosion Stats,,,,,,Deposition Stats,,,,,\\nMax,Min,Mean,Standard Deviation,99th percentile,,Max,Min,Mean,Standard Deviation,99th percentile\\n')\n #if clause tests if numbers are inixed, and then runs the loop accordingly\n if bool(os.getenv(\"GIS_OPT_suffix\")) == False:\n grass.message(\"Numbers are suffixes to prefix: \" + pattern)\n for x in range((startnum - 1), endnum):\n tempmap = \"temp_cum_netchange_before_smoothing_%s\" % (x + 1)\n if (x + 1) == startnum:\n outmap = \"%s%s%s\" % (pattern, infix, str(startnum).zfill(digits))\n grass.run_command('g.copy', quiet = True, rast = '%s%s,%s' % (pattern, str(startnum).zfill(digits), outmap))\n grass.run_command('r.colors', quiet = True, map = outmap, rules = temp.name)\n else:\n mapone = \"%s%s%s\" % (pattern, infix, str(x).zfill(digits))\n maptwo = \"%s%s\" % (pattern, str(x + 1).zfill(digits))\n outmap = \"%s%s%s\" % (pattern, infix, str(x + 1).zfill(digits))\n grass.message('doing mapcalc statement for cum netchange map of year %s' % (str(x + 1).zfill(digits)))\n grass.mapcalc('${out}=if(abs(${map1} + ${map2}) < 20, ${map1} + ${map2}, 20) ', out = tempmap, map1 = mapone, map2=maptwo)\n grass.run_command('r.neighbors', quiet = True, input = tempmap, output = outmap, method = 'mode', size = '5')\n grass.message('setting colors for statement for map ' + outmap)\n grass.run_command('r.colors', quiet = True, map = outmap, rules = temp.name)\n if ( os.getenv(\"GIS_FLAG_e\") == \"1\" ):\n grass.message('creating png image of map ' + outmap)\n grass.run_command('r.out.png', quiet = True, input = outmap, output = outmap + '.png')\n if bool(os.getenv(\"GIS_OPT_statsout\")) == True:\n grass.message('calculating erosion/deposition statistics for map ' + outmap)\n grass.mapcalc('temperosion=if(${map1} < 0 && ${map1} > -20, ${map1}, null())', map1 = outmap)\n grass.mapcalc('tempdep=if(${map1} > 0 && ${map1} < 20, ${map1}, null())', map1 = outmap)\n dict1 = grass.parse_command('r.univar', flags = 'ge', map = 'temperosion', percentile = '1')\n dict2 = grass.parse_command('r.univar', flags = 'ge', map = 'tempdep', percentile = '99')\n grass.run_command('g.remove', quiet = True, flags = 'f', rast = 'temperosion,tempdep,' + tempmap)\n statsfile.write('%s,%s,%s,%s,%s,,%s,%s,%s,%s,%s\\n' % (dict1['max'], dict1['min'], dict1['mean'], dict1['stddev'], dict1['percentile_1'], dict2['max'], dict2['min'], dict2['mean'], dict2['stddev'], dict2['percentile_99']))\n else:\n suffix = os.getenv(\"GIS_OPT_suffix\")\n grass.message(\"Numbers are infixes between prefix: %s and suffix: %s\" % (pattern, suffix))\n for x in range((startnum - 1), endnum):\n tempmap = \"temp_cum_netchange_before_smoothing_%s\" % (x + 1)\n if (x + 1) == startnum:\n outmap = \"%s%s%s%s\" % (pattern, str(startnum).zfill(digits), infix, suffix)\n grass.run_command('r.neighbors', input = '%s%s%s' % (pattern, str(startnum).zfill(digits), suffix), output = outmap, method = 'mode', size = '5')\n #grass.run_command('g.copy', quiet = True, rast = '%s%s%s,%s' % (pattern, str(startnum).zfill(digits), suffix, outmap))\n grass.run_command('r.colors', quiet = True, map = outmap, rules = temp.name)\n else:\n mapone = \"%s%s%s%s\" % (pattern, str(x).zfill(digits), infix, suffix)\n maptwo = \"%s%s%s\" % (pattern, str(x + 1).zfill(digits), suffix)\n outmap = \"%s%s%s%s\" % (pattern, str(x + 1).zfill(digits), infix, suffix)\n grass.message('doing mapcalc statement for cum netchange map of year %s' % (str(x + 1).zfill(digits)))\n grass.run_command('r.neighbors', input = maptwo, output = tempmap, method = 'mode', size = '5')\n grass.mapcalc('${out}=${map1} + ${map2}', out = outmap, map1 = mapone, map2=tempmap)\n grass.message('setting colors for statement for map %s' % outmap)\n grass.run_command('r.colors', quiet = True, map = outmap, rules = temp.name)\n if ( os.getenv(\"GIS_FLAG_e\") == \"1\" ):\n grass.message('creating png image of map ' + outmap)\n grass.run_command('r.out.png', quiet = True, input = outmap, output = outmap + '.png')\n if bool(os.getenv(\"GIS_OPT_statsout\")) == True:\n grass.message('calculating erosion/deposition statistics for map ' + outmap)\n grass.mapcalc('temperosion=if(${map1} < -0, ${map1}, null())', map1 = outmap)\n grass.mapcalc('tempdep=if(${map1} > 0, ${map1}, null())', map1 = outmap)\n dict1 = grass.parse_command('r.univar', flags = 'ge', map = 'temperosion', percentile = '1')\n dict2 = grass.parse_command('r.univar', flags = 'ge', map = 'tempdep', percentile = '99')\n grass.run_command('g.remove', quiet = True, flags = 'f', rast = 'temperosion,tempdep,' + tempmap)\n statsfile.write('%s,%s,%s,%s,%s,,%s,%s,%s,%s,%s\\n' % (dict1['max'], dict1['min'], dict1['mean'], dict1['stddev'], dict1['percentile_1'], dict2['max'], dict2['min'], dict2['mean'], dict2['stddev'], dict2['percentile_99']))\n if bool(os.getenv(\"GIS_OPT_statsout\")) == True:\n statsfile.close()\n temp.close()\n return\n \nif __name__ == \"__main__\":\n if ( len(sys.argv) <= 1 or sys.argv[1] != \"@ARGS_PARSED@\" ):\n os.execvp(\"g.parser\", [sys.argv[0]] + sys.argv)\n else:\n grass.message(\" Starting the process--hold on!\")\n grass.message(\"It is not done until you see DONE WITH EVERYTHING!\")\n main();\n grass.message(\"DONE WITH EVERYTHING!\")\n","sub_path":"grass6/raster/LandDyn/r.accumulate.erdep.py/r.accumulate.erdep.py","file_name":"r.accumulate.erdep.py","file_ext":"py","file_size_in_byte":9388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"29925397","text":"from __future__ import print_function\nimport hexchat\nimport random\n\n__module_name__ = 'Roll'\n__module_version__ = '1.0'\n__module_description__ = 'Rolls a dice'\n__author__ = 'Some random caffeine drinker'\n\n\ndef roll_cb(word, word_eol, userdata):\n if len(word) > 1:\n random.seed()\n if unicode(word[1], 'utf-8').isnumeric():\n print(\"Rolling from 1 to \" + str(word[1]))\n intrandom = random.randint(1, int(word[1]))\n hexchat.command('me rolled {}'.format(intrandom))\n else:\n print('Specify a number. (roll )')\n print('try /help roll')\n else:\n hexchat.command('help roll')\n return hexchat.EAT_ALL\n\n\ndef unload_cb(userdata):\n print(__module_name__, 'version', __module_version__, 'unloaded.')\n\nhexchat.hook_command('roll', roll_cb, help='roll ')\nhexchat.hook_unload(unload_cb)\nprint(__module_name__, 'version', __module_version__, 'loaded.')\n","sub_path":"roll.py","file_name":"roll.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"577662395","text":"class Solution:\n \n def powerset(self, nums, value, result):\n if nums:\n for idx, i in enumerate(nums):\n value.append(i)\n result.append(value[:])\n self.powerset(nums[idx+1:], value, result)\n value.pop()\n \n def subsets(self, nums: List[int]) -> List[List[int]]:\n result = [[]]\n \n self.powerset(nums,[], result)\n return result\n\t\t\n","sub_path":"problem1.py","file_name":"problem1.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"184661656","text":"from typing import Dict, List, Union\n\nfrom belvo.resources.base import Resource\n\n\nclass TaxStatus(Resource):\n endpoint = \"/api/tax-status/\"\n\n def create(\n self,\n link: str,\n *,\n attach_pdf: bool = False,\n save_data: bool = True,\n raise_exception: bool = False,\n **kwargs: Dict,\n ) -> Union[List[Dict], Dict]:\n\n data = {\"link\": link, \"attach_pdf\": attach_pdf, \"save_data\": save_data}\n\n return self.session.post(\n self.endpoint, data=data, raise_exception=raise_exception, **kwargs\n )\n\n def resume(\n self,\n session: str,\n token: str,\n *,\n link: str = None,\n raise_exception: bool = False,\n **kwargs: Dict,\n ) -> Dict:\n raise NotImplementedError()\n","sub_path":"belvo/resources/tax_status.py","file_name":"tax_status.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"326645036","text":"for _ in range(int(input())):\n n = int(input())\n a = [int(i) for i in input().split()]\n\n b = []\n a1 = []\n sub_a = []\n max_a = None\n\n for i in range(len(a)):\n if a[i] > 0:\n b.append(True)\n else:\n b.append(False)\n\n for i in range(len(a)):\n if b[i] is True:\n a1.append(a[i])\n if i + 1 == n or b[i + 1] is False:\n max_a = max(a1)\n sub_a.append(max_a)\n a1 = []\n\n if b[i] is False:\n a1.append(a[i])\n if i + 1 == n or b[i + 1] is True:\n max_a = max(a1)\n sub_a.append(max_a)\n a1 = []\n\n print(sum(sub_a))\n","sub_path":"subrray.py","file_name":"subrray.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"472827651","text":"from unittest import TestCase\nfrom midterm_review import q_25\nfrom unittest.mock import patch\nfrom random import seed\nimport io\n\n\nclass TestQ25(TestCase):\n @patch('sys.stdout', new_callable=io.StringIO)\n def test_q_25_with_random_seed(self, mock_stdout):\n seed(0)\n q_25()\n expected_output = \"1 166774\\n\" \\\n \"2 166865\\n\" \\\n \"3 166709\\n\" \\\n \"4 166657\\n\" \\\n \"5 166695\\n\" \\\n \"6 166300\\n\"\n self.assertEqual(expected_output, mock_stdout.getvalue())\n","sub_path":"1510_201910_lab_06/test_q_25.py","file_name":"test_q_25.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"120466268","text":"import model\n\n\nclass MyStrategy:\n def __init__(self):\n pass\n\n def get_action(self, unit, game, debug):\n def distance_sqr(a, b):\n return (a.x - b.x) ** 2 + (a.y - b.y) ** 2\n\n # Find the nearest enemy unit\n nearest_enemy = min(\n filter(lambda u: u.player_id != unit.player_id, game.units),\n key=lambda u: distance_sqr(u.position, unit.position),\n default=None)\n\n # Find the best weapon: the nearest one\n best_weapon = min(\n filter(lambda box: isinstance(box.item, model.Item.Weapon),\n game.loot_boxes),\n key=lambda box: distance_sqr(box.position, unit.position),\n default=None)\n\n # Calculate the position in the next tick\n if unit.weapon is None and best_weapon is not None:\n target_pos = best_weapon.position\n elif nearest_enemy is not None:\n target_pos = nearest_enemy.position\n else:\n target_pos = unit.position\n\n '''\n if unit.weapon is None and best_weapon is not None:\n target_pos = best_weapon.position\n elif nearest_enemy is not None:\n target_pos = nearest_enemy.position\n '''\n\n debug.draw(model.CustomData.Log(\"Target pos: {}\".format(target_pos)))\n\n # Calculate the aiming direction\n aim = model.Vec2Double(0, 0)\n if nearest_enemy is not None:\n aim = model.Vec2Double(\n nearest_enemy.position.x - unit.position.x,\n nearest_enemy.position.y - unit.position.y)\n\n # Decide whether the unit should jump\n jump = target_pos.y > unit.position.y\n if target_pos.x > unit.position.x and game.level.tiles[int(unit.position.x + 1)][int(unit.position.y)] == model.Tile.WALL:\n jump = True\n if target_pos.x < unit.position.x and game.level.tiles[int(unit.position.x - 1)][int(unit.position.y)] == model.Tile.WALL:\n jump = True\n\n # Decide whether the unit should shoot\n vector_len = (aim.x ** 2 + aim.y ** 2) ** 0.5\n unit_aim = model.Vec2Double(aim.x / vector_len, aim.y / vector_len)\n\n start_x = min(unit.position.x, nearest_enemy.position.x)\n end_x = max(unit.position.x, nearest_enemy.position.x)\n start_y = min(unit.position.y, nearest_enemy.position.y)\n end_y = max(unit.position.y, nearest_enemy.position.y)\n shoot = True\n\n cur_x, cur_y = unit.position.x, unit.position.y\n while start_x <= cur_x <= end_x and start_y <= cur_y <= end_y:\n if game.level.tiles[int(cur_x)][int(cur_y)] != model.Tile.EMPTY:\n shoot = False\n break\n else:\n cur_x += unit_aim.x\n cur_y += unit_aim.y\n\n unit_action = model.UnitAction(\n velocity=target_pos.x - unit.position.x,\n jump=jump,\n jump_down=not jump,\n aim=aim,\n shoot=shoot,\n reload=False,\n swap_weapon=False,\n plant_mine=False)\n\n return unit_action\n","sub_path":"aicup-python/my_strategy.py","file_name":"my_strategy.py","file_ext":"py","file_size_in_byte":3104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"135404055","text":"# coding: utf-8\n# Тест формы для поиска по фильмам на главной странице кинопоиска (удачный и неудачный)\n# pkuzmichev 30.05.2016\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\n\ndriver = webdriver.Firefox()\ndriver.implicitly_wait(30)\ndriver.maximize_window()\ndriver.get(\"http://www.kinopoisk.ru/\")\n\nsearch_area = driver.find_element_by_id(\"search_input\")\nsearch_area.send_keys(\"sdifasugfsafhusaifha\")\nsearch_area.send_keys(Keys.RETURN)\n# fail\nsearch_area = driver.find_element_by_css_selector(\"#search_input\")\nsearch_area.clear()\nsearch_area.send_keys(\"Finding Dory\")\nsearch_area.send_keys(Keys.RETURN)\n\ndriver.quit()","sub_path":"kinopoisk/test_search_films.py","file_name":"test_search_films.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"103264841","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom lxml import etree\nfrom amazon.items import AmazonItem\nimport requests\nfrom fake_useragent import UserAgent\nimport re, datetime\n\n\nclass AmazonspiderSpider(scrapy.Spider):\n name = 'amazonSpider'\n allowed_domains = ['www.amazon.com']\n keyword='earbuds'\n start_urls = [f'https://www.amazon.com/s?k=earbuds&page={page}' for page in range(1, 51)]\n headers = {'user-agent': UserAgent().chrome,'referer': 'https://www.amazon.com/'}\n\n # start\n def start_requests(self):\n for url in self.start_urls:\n yield scrapy.Request(url, headers=self.headers, callback=self.parse)\n\n # html\n def parse(self, response):\n print(f'{response.url} 请求成功')\n retry_num = 1\n html = response.text\n\n try:\n if 'Enter the characters you see below' in html:\n print('需输入验证码:{}'.format(response.url))\n raise requests.exceptions.RequestException\n except Exception as e:\n print(e)\n if response.status == 404:\n print('404错误:{}'.format(response.url))\n\n if retry_num <= 10:\n print(f'{response.url}重试第{retry_num}次')\n self.retry(url=response.url)\n retry_num += 1\n\n else:\n print(f'重试失败{response.url}')\n html = ''\n return self.parse_page(html=html)\n\n def retry(self, url):\n\n yield scrapy.Request(\n url=url,\n headers=self.headers,\n callback=self.parse\n )\n\n # get asin\n def parse_page(self,html):\n if html == '':\n print('页面获取失败')\n else:\n print('获取页面成功')\n mytree = etree.HTML(html)\n asin_box = mytree.xpath('//*[@id=\"search\"]/div[1]/div[2]/div/span[3]/div[1]/div[contains(@data-asin,\"B\")]/@data-asin')\n\n for asin in asin_box:\n url = 'https://www.amazon.com/dp/' + asin\n\n try:\n yield scrapy.Request(url=url, callback=self.get_detail, headers=self.headers)\n except Exception as e:\n print(e)\n self.retry_asin_url(url=url)\n\n def retry_asin_url(self,url):\n retry_num = 1\n try:\n print(f'{url}回调重试第{retry_num}次')\n yield scrapy.Request(url=url, callback=self.get_detail, headers=self.headers)\n except Exception as e:\n print(e)\n\n if retry_num <= 11:\n self.retry_asin_url(url)\n print(f'详情页{url}回调出错重试第{retry_num}次')\n retry_num += 1\n else:\n print(f'{url}详情页获取失败')\n\n def get_detail(self,res):\n\n print(f'{res.url} 请求成功')\n\n retry_num = 1\n html_text = res.text\n\n try:\n if 'Enter the characters you see below' in html_text:\n print('需输入验证码:{}'.format(res.url))\n raise requests.exceptions.RequestException\n except Exception as e:\n print(e)\n if res.status == 404:\n print('404错误:{}'.format(res.url))\n\n if retry_num <= 10:\n self.retry_asin_url(url=res.url)\n print(f'{res.url}重试第{retry_num}次')\n retry_num += 1\n else:\n print(f'爬取失败{res.url}')\n\n print(f'解析详情页面{res.url}')\n\n item = {}\n keyword = self.keyword\n item['keyword'] = keyword\n url = res.url\n item['url'] = url\n item['asin'] = url.split('/')[-1]\n html = etree.HTML(html_text)\n\n # title\n try:\n item['title'] = html.xpath('//span[@id=\"productTitle\"]/text()')[0].strip().replace('\\xa0', ' ')\n except Exception as e:\n item['title'] = ''\n\n # brand\n if html.xpath('//a[@id=\"bylineInfo\"]/text()') and html.xpath('//a[@id=\"bylineInfo\"]/text()'):\n item['brand'] = html.xpath('//a[@id=\"bylineInfo\"]/text()')[0].replace('by','').strip()\n elif html.xpath('//a[contains(@class,\"contributorNameID\")]/text()'):\n item['brand'] = html.xpath('//a[contains(@class,\"contributorNameID\")]/text()')[0].strip()\n else:\n item['brand'] = ''\n\n # price\n if html.xpath('//span[@id=\"priceblock_ourprice\"]/text()'):\n price = html.xpath('//span[@id=\"priceblock_ourprice\"]/text()')[0]\n item['price'] = price[1:]\n else:\n item['price'] = ''\n\n # main pic\n if html.xpath('//div[@id=\"imgTagWrapperId\"]/img/@data-old-hires'):\n if html.xpath('//div[@id=\"imgTagWrapperId\"]/img/@data-old-hires')[0]:\n item['picture'] = html.xpath('//div[@id=\"imgTagWrapperId\"]/img/@data-old-hires')[0]\n else:\n item['picture'] = re.findall('\"(https.*?\\.jpg)\"',html.xpath('//div[@id=\"imgTagWrapperId\"]/img/@data-a-dynamic-image')[0])[0]\n elif re.findall('\"mainUrl\":\"(https.*?\\.jpg)\"', html_text):\n item['picture'] = re.findall('\"mainUrl\":\"(https.*?\\.jpg)\"', html_text)[0]\n elif html.xpath('//div[@id=\"digitalMusicProductImage_feature_div\"]/img/@src'):\n item['picture'] = html.xpath('//div[@id=\"digitalMusicProductImage_feature_div\"]/img/@src')[0]\n else:\n item['picture'] = ''\n item['picture'] = re.sub('\\._[A-Z]{2}\\d{4}_', '', item['picture'])\n\n # stars\n try:\n item['stars'] = float(html.xpath('//span[@id=\"acrPopover\"]/@title')[0].split(' ')[0])\n except:\n item['stars'] = 0.0\n\n # reviews\n try:\n item['reviews'] = int(html.xpath('//span[@id=\"acrCustomerReviewText\"]/text()')[0].split(' ')[0].replace(',', ''))\n except:\n item['reviews'] = 0\n\n # father bsr\n if re.findall('#(.*?) in (.*?) \\(.*?See Top 100 in.*?\\)', html_text):\n item['rank'] = re.findall('#(.*?) in (.*?) \\(.*?See Top 100 in.*?\\)', html_text)[0]\n else:\n item['rank'] = ''\n\n item['ranks'] = []\n\n # son bsr\n if html.xpath('//li[@class=\"zg_hrsr_item\"]'):\n ranks_no = html.xpath('//li[@class=\"zg_hrsr_item\"]/span[@class=\"zg_hrsr_rank\"]/text()')\n ranks_name = html.xpath('//li[@class=\"zg_hrsr_item\"]/span[@class=\"zg_hrsr_ladder\"]/a/text()')\n item['ranks'] = [(no.replace('#', ''), name) for no, name in zip(ranks_no, ranks_name)]\n\n if not item['ranks']:\n item['ranks'] = re.findall(\"#(.*?) in (.*?)<\\/a>\", html_text)\n\n item['time'] = datetime.date.today()\n\n yield item\n\n","sub_path":"amazon/spiders/amazonSpider.py","file_name":"amazonSpider.py","file_ext":"py","file_size_in_byte":6761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"383827745","text":"from behave import *\r\nfrom selenium import webdriver\r\nfrom selenium.common.exceptions import ElementNotVisibleException\r\nimport time\r\nimport re\r\n\r\n@given(u'I am on the homepage \"{url}\"')\r\ndef given_url(context, url):\r\n\tcontext.browser.get(url)\r\n\r\n@when(u'I enter text \"{google_search_text}\" and click \"{button_name}\"')\r\ndef google_search(context, google_search_text, button_name):\r\n\tcontext.browser.find_element_by_xpath('.//input[@name=\"q\"]').send_keys(google_search_text)\r\n\ttime.sleep(1)\r\n\tcontext.browser.find_element_by_xpath('.//button[@value=\"' + button_name + '\"]').click()\r\n\ttime.sleep(1)\r\n\r\n@when(u'I click on the \"{partial_link_text}\" link')\r\ndef click_partial_link_text(context, partial_link_text):\r\n\ttime.sleep(2)\r\n\tlink = context.browser.find_element_by_partial_link_text(partial_link_text)\r\n\ttry:\r\n\t\t\tlink.click()\r\n\texcept ElementNotVisibleException:\r\n\t\tcontext.browser.execute_script(\"arguments[0].scrollIntoView();\", link)\r\n\t\tlink.click()\r\n\ttime.sleep(1)\r\n\r\n@then(u'I am brought to website \"{url}\"')\r\ndef current_page(context, url):\r\n\tassert context.browser.current_url == url\r\n\r\n@then(u\"the page's headline is \\\"{headline}\\\"\")\r\ndef headline_text(context, headline):\r\n\tassert context.browser.find_element_by_xpath('//h1').text == headline\r\n\r\ndef ignore_line_breaks(text):\r\n\tnew_text = re.sub(r'\\n|\\r', ' ', text)\r\n\treturn new_text\r\n\r\n@then(u'I see text \"{seen_text}\"')\r\ndef search_page_text(context, seen_text):\r\n\tassert ignore_line_breaks(seen_text) in ignore_line_breaks(context.browser.find_element_by_xpath('//body').text)\r\n\r\n@then(u'I see text')\r\ndef search_page_pystring(context):\r\n\tsearch_page_text(context, context.text)\r\n\r\n@then(u'I see the following leaders')\r\ndef tap_leaders(context):\r\n\tleader_xpath = '//div[@class=\"leadership\"]//h2'\r\n\tseen_leaders = [x.text for x in context.browser.find_elements_by_xpath(leader_xpath)]\r\n\texpected_leaders = context.text.split('\\n')\r\n\tassert len(seen_leaders) == len(expected_leaders)\r\n\tfor leader in expected_leaders:\r\n\t\tassert ignore_line_breaks(leader) in seen_leaders\r\n\r\ndef get_labeled_input(context, label):\r\n\tlabel = context.browser.find_element_by_xpath('//label[text()=\"' + label + '\"]')\r\n\tlabeled_input = context.browser.find_element_by_id(label.get_attribute('for'))\r\n\treturn labeled_input\r\n\r\n@then(u'I can fill out the form')\r\ndef tap_contact_form(context):\r\n\tfor row in context.table:\r\n\t\tinput_field = get_labeled_input(context, row['key'])\r\n\t\ttry:\r\n\t\t\tinput_field.send_keys(row['value'])\r\n\t\texcept ElementNotVisibleException:\r\n\t\t\tcontext.browser.execute_script(\"arguments[0].scrollIntoView();\", input_field)\r\n\t\t\tinput_field.send_keys(row['value'])","sub_path":"Features/Steps/steps.py","file_name":"steps.py","file_ext":"py","file_size_in_byte":2625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"8971527","text":"# ABC151C\nN, M = map(int, input().split())\nP = [0] * M\nS = [\"\"] * M\n\ncheck_AC = [False] * N\nfor i in range(M):\n p, s = map(str, input().split())\n p = int(p)\n if s == \"AC\":\n check_AC[p-1] = True\n P[i] = p\n S[i] = s\n\nans_AC = 0\nans_WA = 0\nfor i in range(M):\n if check_AC[P[i]-1]:\n if S[i] == \"AC\":\n ans_AC += 1\n check_AC[P[i] - 1] = False\n else:\n ans_WA += 1\n else:\n continue\nprint(ans_AC, ans_WA)\n","sub_path":"AtCoder_Python/ABC/ABC151/ABC_151_C.py","file_name":"ABC_151_C.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"177102563","text":"from flask import Blueprint, request, render_template\nimport time\nfrom ..Common import pexelScraper\nfrom ..Common import nav\nimport json\n\nnav.register(\"projects\",\"DIC\",\"/dic\")\ndic = Blueprint('dic', __name__, template_folder='templates', static_folder='static')\n\n@dic.route('/')\ndef index():\n\tnavBar = nav.getNav().render(\"projects\",'DIC')\n\tif \"top\" in request.args and \"bot\" in request.args:\n\t\ttop = request.args[\"top\"]\n\t\tbot = request.args[\"bot\"]\n\telse:\n\t\ttop = \"CLICK ME\"\n\t\tbot = \"EDIT ME\"\n\treturn render_template('DIC.html',top=top,bot=bot,navBar=navBar,t=str(time.time()))\n\n@dic.route('/imgRequest')\ndef imgRequest():\n\ttry:\n\t\tw = int(request.args['w'])\n\t\th = int(request.args['h'])\n\texcept:\n\t\tabort(404)\n\timg = pexelScraper.simpleSearch(w,h)\n\tif img != None:\n\t\treturn json.dumps(img)\n\telse:\n\t\tabort(404)","sub_path":"pages/DIC_Page/DIC.py","file_name":"DIC.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"441363447","text":"from warnings import warn\n\nimport numpy as np\nfrom copy import copy\n\nimport vispy.color\n\nfrom .._base_layer import Layer\nfrom ..._vispy.scene.visuals import Image as ImageNode\n\nfrom ...util import is_multichannel\nfrom ...util.interpolation import (interpolation_names,\n interpolation_index_to_name as _index_to_name, # noqa\n interpolation_name_to_index as _name_to_index) # noqa\nfrom ...util.misc import guess_metadata\nfrom ...util.colormaps import matplotlib_colormaps\nfrom ...util.colormaps.vendored import cm\nfrom ...util.event import Event\n\nfrom .._register import add_to_viewer\n\nfrom .view import QtImageLayer\nfrom .view import QtImageControls\n\n\ndef _increment_unnamed_colormap(name, names):\n if name == '[unnamed colormap]':\n past_names = [n for n in names\n if n.startswith('[unnamed colormap')]\n name = f'[unnamed colormap {len(past_names)}]'\n return name\n\n\ndef vispy_or_mpl_colormap(name):\n \"\"\"Try to get a colormap from vispy, or convert an mpl one to vispy format.\n\n Parameters\n ----------\n name : str\n The name of the colormap.\n\n Returns\n -------\n cmap : vispy.color.Colormap\n The found colormap.\n\n Raises\n ------\n KeyError\n If no colormap with that name is found within vispy or matplotlib.\n \"\"\"\n vispy_cmaps = vispy.color.get_colormaps()\n if name in vispy_cmaps:\n cmap = vispy.color.get_colormap(name)\n else:\n try:\n mpl_cmap = getattr(cm, name)\n except AttributeError:\n raise KeyError(f'Colormap \"{name}\" not found in either vispy '\n 'or matplotlib.')\n mpl_colors = mpl_cmap(np.linspace(0, 1, 256))\n cmap = vispy.color.Colormap(mpl_colors)\n return cmap\n\n\nAVAILABLE_COLORMAPS = {k: vispy_or_mpl_colormap(k)\n for k in matplotlib_colormaps +\n list(vispy.color.get_colormaps())}\n\n\n@add_to_viewer\nclass Image(Layer):\n \"\"\"Image layer.\n\n Parameters\n ----------\n image : np.ndarray\n Image data.\n meta : dict, optional\n Image metadata.\n multichannel : bool, optional\n Whether the image is multichannel. Guesses if None.\n name : str, keyword-only\n Name of the layer.\n clim_range : list | array | None\n Length two list or array with the default color limit range for the\n image. If not passed will be calculated as the min and max of the\n image. Passing a value prevents this calculation which can be useful\n when working with very large datasets that are dynamically loaded.\n **kwargs : dict\n Parameters that will be translated to metadata.\n \"\"\"\n _colormaps = AVAILABLE_COLORMAPS\n\n default_colormap = 'magma'\n default_interpolation = 'nearest'\n\n def __init__(self, image, meta=None, multichannel=None, *, name=None,\n clim_range=None, **kwargs):\n if name is None and meta is not None:\n if 'name' in meta:\n name = meta['name']\n\n visual = ImageNode(None, method='auto')\n super().__init__(visual, name)\n\n self.events.add(clim=Event,\n colormap=Event,\n interpolation=Event)\n\n meta = guess_metadata(image, meta, multichannel, kwargs)\n\n self._image = image\n self._meta = meta\n self.colormap_name = Image.default_colormap\n self.colormap = Image.default_colormap\n self.interpolation = Image.default_interpolation\n self._interpolation_names = interpolation_names\n\n # update flags\n self._need_display_update = False\n self._need_visual_update = False\n\n if clim_range is None:\n self._clim_range = self._clim_range_default()\n else:\n self._clim_range = clim_range\n self._node.clim = self._clim_range\n\n cmin, cmax = self.clim\n self._clim_msg = f'{cmin: 0.3}, {cmax: 0.3}'\n\n self._qt_properties = QtImageLayer(self)\n self._qt_controls = QtImageControls(self)\n\n @property\n def image(self):\n \"\"\"np.ndarray: Image data.\n \"\"\"\n return self._image\n\n @image.setter\n def image(self, image):\n self._image = image\n\n self.refresh()\n\n @property\n def meta(self):\n \"\"\"dict: Image metadata.\n \"\"\"\n return self._meta\n\n @meta.setter\n def meta(self, meta):\n self._meta = meta\n\n self.refresh()\n\n @property\n def data(self):\n \"\"\"tuple of np.ndarray, dict: Image data and metadata.\n \"\"\"\n return self.image, self.meta\n\n @data.setter\n def data(self, data):\n self._image, self._meta = data\n self.refresh()\n\n def _get_shape(self):\n if self.multichannel:\n return self.image.shape[:-1]\n return self.image.shape\n\n def _update(self):\n \"\"\"Update the underlying visual.\n \"\"\"\n if self._need_display_update:\n self._need_display_update = False\n\n self._node._need_colortransform_update = True\n self._set_view_slice(self.viewer.dims.indices)\n\n if self._need_visual_update:\n self._need_visual_update = False\n self._node.update()\n\n def _refresh(self):\n \"\"\"Fully refresh the underlying visual.\n \"\"\"\n self._need_display_update = True\n self._update()\n\n def _slice_image(self, indices):\n \"\"\"Determines the slice of image given the indices.\n\n Parameters\n ----------\n indices : sequence of int or slice\n Indices to slice with.\n \"\"\"\n\n ndim = self.ndim\n\n indices = list(indices)\n indices = indices[-ndim:]\n\n for dim in range(len(indices)):\n max_dim_index = self.image.shape[dim] - 1\n\n try:\n if indices[dim] > max_dim_index:\n indices[dim] = max_dim_index\n except TypeError:\n pass\n\n self._image_view = np.asarray(self.image[tuple(indices)])\n\n return self._image_view\n\n def _set_view_slice(self, indices):\n \"\"\"Sets the view given the indices to slice with.\n\n Parameters\n ----------\n indices : sequence of int or slice\n Indices to slice with.\n \"\"\"\n sliced_image = self._slice_image(indices)\n\n self._node.set_data(sliced_image)\n\n self._need_visual_update = True\n self._update()\n\n @property\n def multichannel(self):\n \"\"\"bool: Whether the image is multichannel.\n \"\"\"\n return is_multichannel(self.meta)\n\n @multichannel.setter\n def multichannel(self, val):\n if val == self.multichannel:\n return\n\n self.meta['itype'] = 'multi'\n\n self._need_display_update = True\n self._update()\n\n @property\n def interpolation_index(self):\n \"\"\"int: Index of the current interpolation method equipped.\n \"\"\"\n return self._interpolation_index\n\n @interpolation_index.setter\n def interpolation_index(self, interpolation_index):\n intp_index = interpolation_index % len(interpolation_names)\n self._interpolation_index = intp_index\n self._node.interpolation = _index_to_name(intp_index)\n\n @property\n def colormap(self):\n \"\"\"string or ColorMap: Colormap to use for luminance images.\n \"\"\"\n return self.colormap_name, self._node.cmap\n\n @colormap.setter\n def colormap(self, colormap):\n name = '[unnamed colormap]'\n if isinstance(colormap, str):\n name = colormap\n elif isinstance(colormap, tuple):\n name, cmap = colormap\n self._colormaps[name] = cmap\n elif isinstance(colormap, dict):\n self._colormaps.update(colormap)\n name = list(colormap)[0] # first key in dict\n elif isinstance(colormap, vispy.color.Colormap):\n name = _increment_unnamed_colormap(name,\n list(self._colormaps.keys()))\n self._colormaps[name] = colormap\n else:\n warn(f'invalid value for colormap: {colormap}')\n name = self.colormap_name\n self.colormap_name = name\n self._node.cmap = self._colormaps[name]\n self.events.colormap()\n\n @property\n def colormaps(self):\n \"\"\"tuple of str: names of available colormaps.\n \"\"\"\n return tuple(self._colormaps.keys())\n\n # wrap visual properties:\n @property\n def clim(self):\n \"\"\"string or tuple of float: Limits to use for the colormap.\n Can be 'auto' to auto-set bounds to the min and max of the data.\n \"\"\"\n return self._node.clim\n\n @clim.setter\n def clim(self, clim):\n self._clim_msg = f'{clim[0]: 0.3}, {clim[1]: 0.3}'\n self.status = self._clim_msg\n self._node.clim = clim\n self.events.clim()\n\n @property\n def method(self):\n \"\"\"string: Selects method of rendering image in case of non-linear\n transforms. Each method produces similar results, but may trade\n efficiency and accuracy. If the transform is linear, this parameter\n is ignored and a single quad is drawn around the area of the image.\n\n * 'auto': Automatically select 'impostor' if the image is drawn\n with a nonlinear transform; otherwise select 'subdivide'.\n * 'subdivide': ImageVisual is represented as a grid of triangles\n with texture coordinates linearly mapped.\n * 'impostor': ImageVisual is represented as a quad covering the\n entire view, with texture coordinates determined by the\n transform. This produces the best transformation results, but may\n be slow.\n \"\"\"\n return self._node.method\n\n @method.setter\n def method(self, method):\n self._node.method = method\n\n @property\n def interpolation(self):\n \"\"\"string: Equipped interpolation method's name.\n \"\"\"\n return _index_to_name(self.interpolation_index)\n\n @interpolation.setter\n def interpolation(self, interpolation):\n self.interpolation_index = _name_to_index(interpolation)\n self.events.interpolation()\n\n @property\n def interpolation_functions(self):\n return tuple(interpolation_names)\n\n def _clim_range_default(self):\n return [float(self.image.min()), float(self.image.max())]\n\n def get_value(self, position, indices):\n \"\"\"Returns coordinates, values, and a string for a given mouse position\n and set of indices.\n\n Parameters\n ----------\n position : sequence of two int\n Position of mouse cursor in canvas.\n indices : sequence of int or slice\n Indices that make up the slice.\n\n Returns\n ----------\n coord : sequence of int\n Position of mouse cursor in data.\n value : int or float or sequence of int or float\n Value of the data at the coord.\n msg : string\n String containing a message that can be used as\n a status update.\n \"\"\"\n transform = self._node.canvas.scene.node_transform(self._node)\n pos = transform.map(position)\n pos = [np.clip(pos[1], 0, self._image_view.shape[0]-1),\n np.clip(pos[0], 0, self._image_view.shape[1]-1)]\n coord = list(indices)\n coord[-2] = int(pos[0])\n coord[-1] = int(pos[1])\n value = self._image_view[tuple(coord[-2:])]\n msg = f'{coord}, {self.name}' + ', value '\n if isinstance(value, np.ndarray):\n if isinstance(value[0], np.integer):\n msg = msg + str(value)\n else:\n v_str = '[' + str.join(', ', [f'{v:0.3}' for v in value]) + ']'\n msg = msg + v_str\n else:\n if isinstance(value, np.integer):\n msg = msg + str(value)\n else:\n msg = msg + f'{value:0.3}'\n return coord, value, msg\n\n def on_mouse_move(self, event):\n \"\"\"Called whenever mouse moves over canvas.\n \"\"\"\n if event.pos is None:\n return\n coord, value, msg = self.get_value(event.pos, self.viewer.dims.indices)\n self.status = msg\n","sub_path":"napari/layers/_image_layer/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":12345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"47508719","text":"#-*- coding: utf-8 -*-\n\n# Testing module for Evaluator\n# - includes print functions / ... / ...\n#\n# ==============================================================================\n\nfrom __future__ import unicode_literals, print_function, division\n\nimport evaluator\nimport numpy as np\n\nfrom sklearn.datasets import make_classification, make_regression\n\ntest_type = 5\n\nout_path = '/Users/songwon/workspace/personal-ml-utils/%s/' % test_type\n\nprint('Test type: %s' % test_type)\n\nif test_type < 2:\n X, y = make_regression(\n n_samples = 500,\n n_features = 10)\nelse:\n X, y = make_classification(\n n_samples = 5000,\n n_features = 15,\n n_informative = 7,\n n_classes = test_type)\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = \\\n train_test_split(X, y, test_size=0.3)\n\nif test_type >= 2:\n from sklearn.preprocessing import LabelEncoder\n le = LabelEncoder().fit(y_train)\n y_train = le.transform(y_train)\n y_test = le.transform(y_test)\n\n\nfrom sklearn.neighbors import KNeighborsRegressor, KNeighborsClassifier\n\nif test_type < 2:\n mdl = KNeighborsRegressor().fit(X_train, y_train)\n pred = mdl.predict(X_test)\n ev = evaluator.RegressionEvaluator(name='regression', out_path=out_path, pred=pred, y=y_test)\nelse:\n mdl = KNeighborsClassifier().fit(X_train, y_train)\n prob = mdl.predict_proba(X_test)\n pred = mdl.predict(X_test)\n # ev = evaluator.ClassificationEvaluator('%s_class' %test_type, out_path, prob, pred, y_test, le.classes_)\n ev = evaluator.ClassificationEvaluator(name='%s_class' %test_type, out_path=out_path, prob=prob, y=y_test, classes=le.classes_)\n\n\n##### Test module\n\nif test_type < 2:\n #ev.plot_prediction()\n #ev.plot_prediction(kde=False)\n #ev.plot_residual()\n #ev.plot_residual(kde=False)\n ev.evaluate()\nelse:\n #ev.plot_roc()\n #ev.plot_score_dist()\n #cm = ev.get_confusion_matrix()\n #ev.plot_confusion_matrix(cm)\n #perf = ev.get_performance(cm)\n #print(perf)\n #ev.print_performance(perf)\n ev.evaluate()\n \n\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"286108912","text":"\"\"\"clowder.yaml parsing and functionality\"\"\"\nimport os, yaml\nfrom termcolor import colored\nfrom clowder.group import Group\nfrom clowder.source import Source\nfrom clowder.utility.clowder_utilities import (\n forall_run,\n validate_yaml,\n print_exiting\n)\n\nclass ClowderController(object):\n \"\"\"Class encapsulating project information from clowder.yaml for controlling clowder\"\"\"\n def __init__(self, rootDirectory):\n self.root_directory = rootDirectory\n self.default_ref = None\n self.default_remote = None\n self.default_source = None\n self.groups = []\n self.sources = []\n\n self._load_yaml()\n\n def fix_version(self, version):\n \"\"\"Save current commits to a clowder.yaml in the versions directory\"\"\"\n self._validate_projects_exist()\n self._validate(self.get_all_group_names())\n versions_dir = os.path.join(self.root_directory, 'clowder', 'versions')\n version_name = version.replace('/', '-') # Replace path separateors with dashes\n version_dir = os.path.join(versions_dir, version_name)\n if not os.path.exists(version_dir):\n os.makedirs(version_dir)\n\n yaml_file = os.path.join(version_dir, 'clowder.yaml')\n yaml_file_output = colored(yaml_file, 'cyan')\n version_output = colored(version_name, attrs=['bold'])\n if not os.path.exists(yaml_file):\n with open(yaml_file, 'w') as file:\n print('Fixing version ' + version_output + ' at ' + yaml_file_output)\n yaml.dump(self._get_yaml(), file, default_flow_style=False)\n else:\n print('Version ' + version_output + ' already exists at ' + yaml_file_output)\n print_exiting()\n\n def forall_groups(self, command, group_names):\n \"\"\"Runs command in all project directories of groups specified\"\"\"\n directories = []\n for group in self.groups:\n if group.name in group_names:\n for project in group.projects:\n directories.append(project.full_path())\n forall_run(command, directories)\n\n def forall_projects(self, command, project_names):\n \"\"\"Runs command in all project directories of projects specified\"\"\"\n directories = []\n for group in self.groups:\n for project in group.projects:\n if project.name in project_names:\n directories.append(project.full_path())\n forall_run(command, directories)\n\n def get_all_group_names(self):\n \"\"\"Returns all group names for current clowder.yaml\"\"\"\n return sorted([g.name for g in self.groups])\n\n def get_all_project_names(self):\n \"\"\"Returns all project names for current clowder.yaml\"\"\"\n return sorted([p.name for g in self.groups for p in g.projects])\n\n def get_fixed_version_names(self):\n \"\"\"Return list of all fixed versions\"\"\"\n versions_dir = os.path.join(self.root_directory, 'clowder', 'versions')\n if os.path.exists(versions_dir):\n return os.listdir(versions_dir)\n else:\n return None\n\n def groom_groups(self, group_names):\n \"\"\"Discard changes for projects\"\"\"\n if self._is_dirty():\n for group in self.groups:\n if group.name in group_names:\n group.groom()\n else:\n print('No changes to discard')\n\n def groom_projects(self, project_names):\n \"\"\"Discard changes for projects\"\"\"\n if self._is_dirty():\n for group in self.groups:\n for project in group.projects:\n if project.name in project_names:\n project.groom()\n else:\n print('No changes to discard')\n\n def herd_groups(self, group_names):\n \"\"\"Sync projects with latest upstream changes\"\"\"\n self._validate(group_names)\n for group in self.groups:\n if group.name in group_names:\n group.herd()\n\n def herd_projects(self, project_names):\n \"\"\"Sync projects with latest upstream changes\"\"\"\n self._validate(project_names)\n for group in self.groups:\n for project in group.projects:\n if project.name in project_names:\n project.herd()\n\n def meow(self, group_names):\n \"\"\"Print status for projects\"\"\"\n for group in self.groups:\n if group.name in group_names:\n group.meow()\n\n def meow_verbose(self, group_names):\n \"\"\"Print git status for projects with changes\"\"\"\n for group in self.groups:\n if group.name in group_names:\n group.meow_verbose()\n\n def stash_groups(self, group_names):\n \"\"\"Stash changes for projects with changes\"\"\"\n if self._is_dirty():\n for group in self.groups:\n if group.name in group_names:\n group.stash()\n else:\n print('No changes to stash')\n\n def stash_projects(self, project_names):\n \"\"\"Stash changes for projects with changes\"\"\"\n if self._is_dirty():\n for group in self.groups:\n for project in group.projects:\n if project.name in project_names:\n project.stash()\n else:\n print('No changes to stash')\n\n def _get_yaml(self):\n \"\"\"Return python object representation for saving yaml\"\"\"\n groups_yaml = [g.get_yaml() for g in self.groups]\n sources_yaml = [s.get_yaml() for s in self.sources]\n defaults_yaml = {'ref': self.default_ref,\n 'remote': self.default_remote,\n 'source': self.default_source}\n return {'defaults': defaults_yaml,\n 'sources': sources_yaml,\n 'groups': groups_yaml}\n\n def _is_dirty(self):\n \"\"\"Check if there are any dirty projects\"\"\"\n is_dirty = False\n for group in self.groups:\n if group.is_dirty():\n is_dirty = True\n return is_dirty\n\n def _load_yaml(self):\n \"\"\"Load clowder from yaml file\"\"\"\n yaml_file = os.path.join(self.root_directory, 'clowder.yaml')\n if os.path.exists(yaml_file):\n with open(yaml_file) as file:\n parsed_yaml = yaml.safe_load(file)\n validate_yaml(parsed_yaml)\n\n self.default_ref = parsed_yaml['defaults']['ref']\n self.default_remote = parsed_yaml['defaults']['remote']\n self.default_source = parsed_yaml['defaults']['source']\n\n self.sources = [Source(s) for s in parsed_yaml['sources']]\n\n defaults = {'ref': self.default_ref,\n 'remote': self.default_remote,\n 'source': self.default_source}\n\n for group in parsed_yaml['groups']:\n self.groups.append(Group(self.root_directory,\n group,\n defaults,\n self.sources))\n # self.groups.sort(key=lambda group: group.name)\n\n def _validate(self, group_names):\n \"\"\"Validate status of all projects for specified groups\"\"\"\n valid = True\n for group in self.groups:\n if group.name in group_names:\n group.print_validation()\n if not group.is_valid():\n valid = False\n if not valid:\n print_exiting()\n\n def _validate_projects_exist(self):\n \"\"\"Validate existence status of all projects for specified groups\"\"\"\n projects_exist = True\n for group in self.groups:\n group.print_existence_message()\n if not group.projects_exist():\n projects_exist = False\n if not projects_exist:\n herd_output = colored('clowder herd', 'yellow')\n print('')\n print('First run ' + herd_output + ' to clone missing projects')\n print_exiting()\n","sub_path":"clowder/clowder_controller.py","file_name":"clowder_controller.py","file_ext":"py","file_size_in_byte":8085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"568412217","text":"from db import init_db\nfrom db import db_session\nfrom models import TbTest\n\ndef show_tables():\n queries = db_session.query(TbTest)\n entries = [dict(id=q.id, name=q.name, string=q.email) for q in queries]\n print (entires)\n \ndef add_entry(name, email):\n t = TbTest(name, email)\n db_session.add(t)\n db_session.commit()\n\ndef query_entry(name):\n for e in db_session.query(TbTest.name).filter_by(name = name):\n print(e)\ndef query_entry_all():\n u = db_session.query(TbTest.email).all()\n print(u)\n \ndef delete_entry(name, email):\n db_session.query(TbTest).filter(TbTest.name == name, TbTest.email==email).delete()\n db_session.commit()\n \ndef main():\n init_db() \n add_entry(\"김말똥\", \"test@naver.com\")\n add_entry(\"test2\", \"test2@naver.com\")\n query_entry(\"김말똥\")\n delete_entry(\"test\", \"test@naver.com\")\n query_entry_all()\n db_session.close()\n \nif __name__ == \"__main__\" :\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"248280147","text":"import urllib.request, urllib.parse, urllib.error\nimport xml.etree.ElementTree as ET\nimport ssl\n\n#api_key = False\n# If you have a Google Places API key, enter it here\n# api_key = 'AIzaSy___IDByT70'\n# https://developers.google.com/maps/documentation/geocoding/intro\n\n#if api_key is False:\n# api_key = 42\n# serviceurl = 'http://py4e-data.dr-chuck.net/xml?'\n#else :\n# serviceurl = 'https://maps.googleapis.com/maps/api/geocode/xml?'\n\n# Ignore SSL certificate errors\n#ctx = ssl.create_default_context()\n#ctx.check_hostname = False\n#ctx.verify_mode = ssl.CERT_NONE\n\nurl = 'http://py4e-data.dr-chuck.net/comments_40194.xml'\n#uh = urllib.request.urlopen(url, context=ctx)\nuh = urllib.request.urlopen(url)\ndata = uh.read()\n# print(data.decode())\ntotcnt = 0\ntree = ET.fromstring(data)\ncounts = tree.findall('.//count')\nfor cnt in counts:\n# print('count:', cnt.text)\n totcnt = totcnt + int(cnt.text)\nprint('totcnt: ',totcnt)\n","sub_path":"pythonCourses/pyex13/lab13xml.py","file_name":"lab13xml.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"607794990","text":"from typing import List\n\n\nclass leetcode32:\n\n def longestValidParentheses_sdl(self, s: str) -> int:\n # 通过率91/231\n if not s:\n return 0\n count = 0\n while '()' in s or '[]' in s or '{}' in s:\n for index in ['()', '[]', '{}']:\n if index in s:\n count1 = 0\n idx = s.find(index)\n while idx!=-1:\n idx= s.find(index, idx+2, idx+4)\n count1 += 1\n count += 2*count1\n s = s.replace(index, '')\n return count\n def longestValidParentheses(self, s: str) -> int:\n maxans = 0\n dp = [0]*len(s)\n for i in range(1,len(s)):\n if s[i]==')':\n if s[i-1]=='(':\n dp[i] = dp[i-2]+2 if i>=2 else 2\n # 二者等价,python的三目运算是通过if else实现的\n # if i>=2:\n # dp[i] = dp[i-2]+2\n # else:\n # dp[i] = 2\n elif i-dp[i-1] > 0 and s[i-dp[i-1]-1]=='(':\n dp[i] = dp[i-1] + dp[i-dp[i-1]-2] + 2 if i-dp[i-1]>=2 else dp[i-1] + 2\n # if i-dp[i-1]>=2:\n # dp[i] = dp[i-1] + dp[i-dp[i-1]-2] + 2\n # else:\n # dp[i] = dp[i-1] + 2\n maxans = max(maxans, dp[i])\n return maxans\nif __name__ == '__main__':\n s = input().strip()\n print(\"s: \", s)\n leetcode32 = leetcode32()\n ret = leetcode32.longestValidParentheses(s)\n print(\"ret: \", ret)","sub_path":"python_space/python_workspace/leetcode/leetcode32.py","file_name":"leetcode32.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"295654551","text":"import nemo.collections\nimport json\n# NeMo's \"core\" package\nimport nemo\n# NeMo's ASR collection\nimport nemo.collections.asr as nemo_asr\n\nimport os, librosa,tqdm\n# Function to build a manifest\ndef build_manifest(transcripts_path, manifest_path, wav_path):\n data_dir = os.path.dirname(wav_path)\n print ('lookoing for wavs in:{}'.format(data_dir))\n with open(transcripts_path, 'r') as fin:\n with open(manifest_path, 'w') as fout:\n for line in tqdm.tqdm_notebook(fin,total=len(os.listdir(data_dir))):\n # Lines look like this:\n # transcript (fileID)\n transcript = line.split(',')[1]\n transcript = transcript.lower().strip()\n\n file_name = line.split(',')[0].split('/')[1]\n audio_path = os.path.join(\n data_dir, file_name)\n\n duration = librosa.core.get_duration(filename=audio_path)\n\n # Write the metadata to the manifest\n metadata = {\n \"audio_filepath\": audio_path,\n \"duration\": duration,\n \"text\": transcript\n }\n json.dump(metadata, fout)\n fout.write('\\n')\n\nimport json\n\ntrain_manifest = '/data/verbit_train_manifest.json'\n\ntest_manifest = '/data/verbit_test_manifest_small.json'\n# build_manifest(test_transcripts, test_manifest, '/data/test/')\nprint(\"Test manifest created.\")\nprint(\"******\")\n\ndata_dir = '/data/verbit_azure_exp_q15x5/'\n\n# Create our NeuralModuleFactory, which will oversee the neural modules.\nneural_factory = nemo.core.NeuralModuleFactory(\n log_dir=data_dir + '/logdir',\n create_tb_writer=True)\n\nlogger = nemo.logging\n\nimport nemo\n# --- Config Information ---#\nfrom ruamel.yaml import YAML\n\nconfig_path = '/tmp/pycharm_project_573/examples/asr/configs/quarts15x5_verbit_pers.yaml'\n# config_path = '/data/verbit_azure_exp/quarts_verbit_pers.yaml'\n\nyaml = YAML(typ='safe')\n\nwith open(config_path, 'rt', encoding='utf8') as file:\n params = yaml.load(file)\n\nlabels = params['labels'] # Vocab\n\nprint ('labels are: {}'.format(labels))\n\n# --- Instantiate Neural Modules --- #\n\n# Create training and test data layers (which load data) and data preprocessor\ndata_layer_train = nemo_asr.AudioToTextDataLayer.import_from_config(\n config_path,\n \"AudioToTextDataLayer_train\",\n overwrite_params={\"manifest_filepath\": train_manifest}\n) # Training datalayer\n\ndata_layer_test = nemo_asr.AudioToTextDataLayer.import_from_config(\n config_path,\n \"AudioToTextDataLayer_eval\",\n overwrite_params={\"manifest_filepath\": test_manifest}\n) # Eval datalayer\n\ndata_preprocessor = nemo_asr.AudioToMelSpectrogramPreprocessor.import_from_config(\n config_path, \"AudioToMelSpectrogramPreprocessor\"\n)\n\n# Create the Jasper_4x1 encoder as specified, and a CTC decoder\nencoder = nemo_asr.JasperEncoder.import_from_config(\n config_path, \"JasperEncoder\"\n)\n\ndecoder = nemo_asr.JasperDecoderForCTC.import_from_config(\n config_path, \"JasperDecoderForCTC\",\n overwrite_params={\"num_classes\": len(labels)}\n)\n\nctc_loss = nemo_asr.CTCLossNM(num_classes=len(labels))\ngreedy_decoder = nemo_asr.GreedyCTCDecoder()\n\n# --- Assemble Training DAG --- #\naudio_signal, audio_signal_len, transcript, transcript_len = data_layer_train()\n\nprocessed_signal, processed_signal_len = data_preprocessor(\n input_signal=audio_signal,\n length=audio_signal_len)\n\nencoded, encoded_len = encoder(\n audio_signal=processed_signal,\n length=processed_signal_len)\n\nlog_probs = decoder(encoder_output=encoded)\npreds = greedy_decoder(log_probs=log_probs) # Training predictions\nloss = ctc_loss(\n log_probs=log_probs,\n targets=transcript,\n input_length=encoded_len,\n target_length=transcript_len)\n\n# --- Assemble Validation DAG --- #\n(audio_signal_test, audio_len_test,\n transcript_test, transcript_len_test) = data_layer_test()\n\nprocessed_signal_test, processed_len_test = data_preprocessor(\n input_signal=audio_signal_test,\n length=audio_len_test)\n\nencoded_test, encoded_len_test = encoder(\n audio_signal=processed_signal_test,\n length=processed_len_test)\n\nlog_probs_test = decoder(encoder_output=encoded_test)\npreds_test = greedy_decoder(log_probs=log_probs_test) # Test predictions\nloss_test = ctc_loss(\n log_probs=log_probs_test,\n targets=transcript_test,\n input_length=encoded_len_test,\n target_length=transcript_len_test)\n\n\n #--- Create Callbacks --- #\n\n# We use these imports to pass to callbacks more complex functions to perform.\nfrom nemo.collections.asr.helpers import monitor_asr_train_progress, \\\n process_evaluation_batch, process_evaluation_epoch\nfrom functools import partial\n\ntrain_callback = nemo.core.SimpleLossLoggerCallback(\n # Notice that we pass in loss, predictions, and the transcript info.\n # Of course we would like to see our training loss, but we need the\n # other arguments to calculate the WER.\n tensors=[loss, preds, transcript, transcript_len],\n # The print_func defines what gets printed.\n print_func=partial(\n partial(monitor_asr_train_progress,tb_logger=neural_factory.tb_writer ),\n labels=labels),step_freq=100,\n tb_writer=neural_factory.tb_writer\n )\n\n# We can create as many evaluation DAGs and callbacks as we want,\n# which is useful in the case of having more than one evaluation dataset.\n# In this case, we only have one.\neval_callback = nemo.core.EvaluatorCallback(\n eval_tensors=[loss_test, preds_test, transcript_test, transcript_len_test],\n user_iter_callback=partial(\n process_evaluation_batch, labels=labels),\n user_epochs_done_callback=process_evaluation_epoch,\n eval_step=500, # How often we evaluate the model on the test set\n tb_writer=neural_factory.tb_writer\n )\n\ncheckpoint_saver_callback = nemo.core.CheckpointCallback(\n folder=data_dir+'/checkpoints',\n step_freq=500 # How often checkpoints are saved\n )\n\nif not os.path.exists(data_dir+'/checkpoints'):\n os.makedirs(data_dir+'/checkpoints')\n\n\n# --- Start Training! --- #\nneural_factory.train(\n tensors_to_optimize=[loss],\n callbacks=[train_callback, eval_callback, checkpoint_saver_callback],\n optimizer='novograd',batches_per_step=1,\n optimization_params={\n \"num_epochs\": 100000, \"lr\": 0.01, \"weight_decay\": 1e-4\n })\n\n# Training for 100 epochs will take a little while, depending on your machine.\n# It should take about 20 minutes on Google Colab.\n# At the end of 100 epochs, your evaluation WER should be around 20-25%.","sub_path":"examples/asr/notebooks/verbit_test_using_lm.py","file_name":"verbit_test_using_lm.py","file_ext":"py","file_size_in_byte":6546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"510528831","text":"import config_keys\r\nimport copy\r\nimport inspect\r\nimport logging\r\nimport numpy as np\r\nimport operator\r\n\r\nfrom collections import defaultdict\r\nfrom functools import reduce\r\n\r\nfrom model.NetWrapper import NetWrapper, DirectedNetWrapper\r\nfrom model.algorithms import (shortestPath, \r\n connectedComponents,\r\n inducedSubgraph,\r\n getInducedSubgraph,\r\n pathLength)\r\n\r\nnw_pcst_logger = logging.getLogger(__name__)\r\n\r\n_infinity = float(\"inf\")\r\n\r\ndef nodeWeightedPrizeCollectingSteinerTreeLMP(graph, root=None):\r\n \"\"\"Compute an O(\\log n) Lagrangian multiplier preserving approximation of a \r\n node-weighted prize-collecting Steiner tree (LMP NW-PCST).\r\n\r\n Simply modify the underlying penalties of the graph as in Theorem 4.1 of the \r\n paper.\r\n\r\n References:\r\n Jochen K\\\"onemann, Sina Sadeghian, Laura Sanit\\`a, ``An LMP \r\n O(\\log n)-Approximation Algorithm for Node Weighted Prize Collecting \r\n Steiner Tree.'' FOCS, 2013.\r\n\r\n Args:\r\n graph : model.NetWrapper.NetWrapper\r\n The unweighted graph. Will be copied, as further changes may apply.\r\n\r\n root : int, optional\r\n The index of the root node. If None, compute the global solution \r\n to the NW-PCST problem.\r\n\r\n Returns:\r\n Tr : frozenset of ints\r\n An unweighted Tree approximating the LMP NW-PCST solution.\r\n \"\"\"\r\n graph = graph.copy()\r\n cheapVertices = {nodeID for nodeID in graph.getNodeList() \r\n if graph.getNodeAttribute(nodeID,\r\n config_keys.NODE_KEY_COST)\r\n <= graph.getNodeAttribute(nodeID,\r\n config_keys.NODE_KEY_PRIZE)}\r\n _lmpPenalties(graph, cheapVertices)\r\n return nodeWeightedPrizeCollectingSteinerTree(graph, root=root)\r\n\r\n\r\n\r\ndef nodeWeightedPrizeCollectingSteinerTree(graph, root=None):\r\n \"\"\"Compute an O(\\log n) approximation of a node-weighted prize-collecting \r\n Steiner tree (NW-PCST).\r\n\r\n Given a graph = (V,E) with nonnegative costs and prizes on all of its \r\n vertices, approximates the tree T containing the given root vertex that \r\n minimizes cost(T) + prize(V\\T). If root is not specified, computes the \r\n global such T.\r\n\r\n Could be made more memory efficient by not copying the graph.\r\n\r\n References:\r\n Jochen K\\\"onemann, Sina Sadeghian, Laura Sanit\\`a, ``An LMP \r\n O(\\log n)-Approximation Algorithm for Node Weighted Prize Collecting \r\n Steiner Tree.'' FOCS, 2013.\r\n\r\n Args:\r\n graph : model.NetWrapper.NetWrapper\r\n The unweighted graph. Will be copied, as further changes may apply.\r\n\r\n root : int, optional\r\n The index of the root node. If None, compute the global solution \r\n to the NW-PCST problem.\r\n\r\n Returns:\r\n Tr : frozenset of ints\r\n An unweighted Tree approximating the NW-PCST solution.\r\n \"\"\"\r\n if root is None:\r\n V = frozenset(graph.getNodeList())\r\n Tr = frozenset()\r\n opt = _infinity\r\n nw_pcst_logger.info((\"Solving global NW-PCST\\n\"\r\n \"#-----------------------------------\"))\r\n for nodeID in V:\r\n T = nodeWeightedPrizeCollectingSteinerTree(graph, root=nodeID)\r\n candidateOpt = _cost(graph, T) + _penalty(graph, V.difference(T))\r\n if candidateOpt < opt:\r\n nw_pcst_logger.info(\"Found better tree rooted at {}\"\r\n .format(nodeID))\r\n nw_pcst_logger.debug(\"T = {}\".format(T))\r\n opt = candidateOpt\r\n Tr = T\r\n return Tr\r\n if root not in graph.getNodeList():\r\n raise ValueError(\"Root nodeID {} is not a member of input graph\"\r\n .format(root))\r\n nw_pcst_logger.info((\"Solving NW-PCST rooted at {}\\n\"\r\n \"#-----------------------------------\").format(root))\r\n\r\n # Copy input graph.\r\n graph = graph.copy()\r\n # Collect cheap vertices\r\n cheapVertices = {nodeID for nodeID in graph.getNodeList() \r\n if graph.getNodeAttribute(nodeID, \r\n config_keys.NODE_KEY_COST) \r\n <= graph.getNodeAttribute(nodeID, \r\n config_keys.NODE_KEY_PRIZE)}\r\n\r\n # Reset all costs and penalties for the simplified dual formulation\r\n _reduceCostsAndPenalties(graph, cheapVertices)\r\n # Collect all vertices other than the root.\r\n V_prime = graph.getNodeList()\r\n V_prime.remove(root)\r\n # Collect vertices whose cost are at most their prize.\r\n # Add the root.\r\n inducerSet = cheapVertices.union({root})\r\n \"\"\"\r\n Get the connected components of the subgraph induced by root and the cheap \r\n vertices. Stored as frozensets. \r\n \"\"\"\r\n initialComponenets = connectedComponents(inducedSubgraph(graph, inducerSet))\r\n # Start the tree with the component containing the root, and remove it from\r\n # C\r\n Tr = next(component for component in initialComponenets \r\n if root in component)\r\n initialComponenets = initialComponenets.difference(frozenset({Tr}))\r\n C = list()\r\n C.append(initialComponenets)\r\n # The phase number\r\n i = -1\r\n \"\"\"\r\n The core loop. Cycle through phases so long as C[i+1] is not empty.\r\n \"\"\"\r\n while C[i+1]:\r\n \"\"\"\r\n Incrememt phase \\#, implicitly Initialize 2^(n-1) dual variables. \r\n Initialize sets of active and inactive moats, and initialize the (empty)\r\n tree.\r\n \"\"\"\r\n i += 1\r\n # if i == 2:\r\n # return None\r\n nw_pcst_logger.info((\"BEGINNING PHASE {}\\n\" \r\n \"#-----------------------------------\").format(i))\r\n nw_pcst_logger.debug(\"\\tC = {}\".format(_printableSetOfSets(C[i])))\r\n y = defaultdict(lambda : 0.)\r\n A = C[i]\r\n I = frozenset()\r\n T = frozenset()\r\n tau = 0.\r\n \"\"\"\r\n Promote y_S elements until the active moats vanish and A is empty.\r\n \"\"\"\r\n while A:\r\n nw_pcst_logger.info(\"#--- Secondary Loop iter\")\r\n nw_pcst_logger.debug(\"\\tA = {}\\n\\tI = {}\"\r\n .format(_printableSetOfSets(A),\r\n _printableSetOfSets(I)))\r\n v_tilde, epsilon_1 = _getEpsilonOne(graph, y, A, I)\r\n epsilon_2 = _getEpsilonTwo(graph, y, A)\r\n epsilon = min(epsilon_1, epsilon_2)\r\n tau += epsilon\r\n for S in A:\r\n y[S] += epsilon\r\n \"\"\"\r\n Then do something with the ages???\r\n \"\"\"\r\n\r\n if epsilon == epsilon_2:\r\n \"\"\"\r\n A constraint of type (2) became tight. Move any tight moats from \r\n A to I (deactivate them).\r\n \"\"\"\r\n nw_pcst_logger.info(\"### epsilon_2 < epsilon_1\")\r\n for S in A:\r\n penalty = _penalty(graph, S)\r\n sumDuals = _sumDuals(y, S)\r\n if np.isclose(penalty, sumDuals):\r\n nw_pcst_logger.debug(\"\\tMoat {} is now inactive\"\r\n .format(set(S)))\r\n A = A.difference(frozenset({S}))\r\n I = I.union(frozenset({S}))\r\n else:\r\n \"\"\"\r\n A constraint of type (1) became tight for v_tilde. Get the set \r\n of inclusion-wise maximal moats incident on v_tilde. Find the \r\n sum of ages of all component cores that load v_tilde. If it \r\n falls under the threshold and v_tilde is not incident upon Tr, \r\n then remove all of these incident moats and add a supermoat \r\n including v_tilde and all of the removed moats.\r\n \"\"\"\r\n N = _inclusionWiseMaximalIncidentSubsets(graph, \r\n A.union(I), \r\n v_tilde)\r\n nw_pcst_logger.info(\"### epsilon_1 < epsilon_2\")\r\n nw_pcst_logger.debug(\"\\tN = {}\".format(_printableSetOfSets(N)))\r\n coreSum = _getLoadingCoreAgeSum(graph, y, C[i], v_tilde, tau)\r\n if (coreSum < 1.5 * tau and \r\n v_tilde not in graph.getSubgraphNeighbors(Tr)):\r\n nw_pcst_logger.info(\"#### Compressing moats.\")\r\n for S in N:\r\n nw_pcst_logger.debug(\"\\tCombining set {}\".format(set(S)))\r\n A = A.difference(frozenset({S}))\r\n I = I.difference(frozenset({S}))\r\n newMoat = reduce(lambda a, b : a.union(b), N)\r\n newMoat = newMoat.union({v_tilde})\r\n nw_pcst_logger.debug(\"\\tAdding new active Moat {}\"\r\n .format(set(newMoat)))\r\n A = A.union(frozenset({newMoat}))\r\n else:\r\n nw_pcst_logger.info(\"#### Extending phase tree.\")\r\n nw_pcst_logger.debug(\"Extending phase tree {} with tip {}\"\r\n .format(set(T), v_tilde))\r\n T = T.union(frozenset({v_tilde}))\r\n for S in N:\r\n nw_pcst_logger.debug(\"## Running FST for {}\"\r\n .format(set(S)))\r\n nw_pcst_logger.debug(\"## Phase tree : {}\"\r\n .format(set(T)))\r\n T = FST(graph, y, C[i], T, S, frozenset({v_tilde}), tau)\r\n nw_pcst_logger.debug(\"------##----- Finished FST for {}\"\r\n .format(set(S)))\r\n nw_pcst_logger.debug(\"------##----- Have phase tree : {}\"\r\n .format(set(T)))\r\n if len(connectedComponents(getInducedSubgraph(graph, T))) > 1:\r\n nw_pcst_logger.error(\"T is disconnected!\")\r\n break\r\n\r\n nw_pcst_logger.debug(\"Built Tree T = {}\".format(set(T)))\r\n if T:\r\n \"\"\"\r\n Preserve any initial components not subsumed by the phase tree T to \r\n the initial components for the next phase, C[i+1]. If T intersects \r\n the boundary of Tr, then add T to Tr. Otherwise, add T as a new \r\n initial component to C[i+1].\r\n \"\"\"\r\n nextC = frozenset({S for S in C[i] if not S.issubset(T)})\r\n gammaTr = graph.getSubgraphNeighbors(Tr)\r\n treeIntersect = T.intersection(gammaTr)\r\n if treeIntersect:\r\n Tr = Tr.union(T)\r\n else:\r\n nextC = nextC.union(frozenset({T}))\r\n C.append(nextC)\r\n else:\r\n \"\"\"\r\n We have ended a phase with an empty phase tree. The built Steiner \r\n tree must be (approximately) optimized.\r\n \"\"\"\r\n return Tr\r\n nw_pcst_logger.debug(\"Current Tree Tr = {}\".format(set(Tr)))\r\n nw_pcst_logger.debug(\"Current Initial Componenets C = {}\"\r\n .format(_printableSetOfSets(C[i+1])))\r\n\r\n \"\"\"\r\n We have finished a phase with an empty set of initial components. Ergo, we\r\n are finished and should outpout our (approximately) optimized Steiner tree.\r\n \"\"\"\r\n return Tr\r\n\r\ndef _reduceCostsAndPenalties(graph, cheapVertices):\r\n \"\"\" For all vertices, set cost(v)=0 and penalty(v)=penalty(v)-cost(v) if \r\n it is cheap, and set cost(v)=cost(v)-penalty(v) and penalty(v)=0 \r\n otherwise.\r\n\r\n NOTE: Currently implicitly conflating NODE_KEY_PRIZE with \r\n NODE_KEY_PENALTY (which does not exist).\r\n\r\n Args:\r\n graph : model.NetWrapper.NetWrapper\r\n The graph to be modified.\r\n\r\n cheapVertices : collection of ints\r\n Collection of indices computed to be cheap.\r\n\r\n Returns:\r\n None\r\n \"\"\"\r\n for nodeID in graph.getNodeList():\r\n if nodeID in cheapVertices:\r\n cost = graph.getNodeAttribute(nodeID, \r\n config_keys.NODE_KEY_COST)\r\n graph.setNodeAttributeRelative(nodeID,\r\n config_keys.NODE_KEY_PRIZE,\r\n operator.sub,\r\n cost)\r\n graph.setNodeAttribute(nodeID, config_keys.NODE_KEY_COST, 0.)\r\n else:\r\n penalty = graph.getNodeAttribute(nodeID,\r\n config_keys.NODE_KEY_PRIZE)\r\n graph.setNodeAttributeRelative(nodeID,\r\n config_keys.NODE_KEY_COST,\r\n operator.sub,\r\n penalty)\r\n graph.setNodeAttribute(nodeID, config_keys.NODE_KEY_PRIZE, 0.)\r\n\r\ndef _lmpPenalties(graph, cheapVertices):\r\n \"\"\" Set the penalties of all cheap vertices v to 2*penalty(v) - cost(v).\r\n\r\n NOTE: Currently implicitly conflating NODE_KEY_PRIZE with \r\n NODE_KEY_PENALTY (which does not exist).\r\n\r\n Args:\r\n graph : model.NetWrapper.NetWrapper\r\n The graph to be modified.\r\n\r\n cheapVertices : collection of ints\r\n Collection of indices computed to be cheap.\r\n\r\n Returns:\r\n None\r\n \"\"\"\r\n for nodeID in cheapVertices:\r\n cost = graph.getNodeAttribute(nodeID, config_keys.NODE_KEY_COST)\r\n penalty = graph.getNodeAttribute(nodeID, config_keys.NODE_KEY_PRIZE)\r\n difference = penalty - cost\r\n graph.setNodeAttributeRelative(nodeID, \r\n config_keys.NODE_KEY_PRIZE,\r\n operator.add,\r\n difference)\r\n\r\ndef _getLoadingCoreAgeSum(graph, y, C, v, tau):\r\n \"\"\"Get the sum of ages of all cores of support sets that load v.\r\n\r\n Args:\r\n graph : model.NetWrapper.NetWrapper\r\n The unweighted graph.\r\n\r\n y : defaultdict (frozenset : float)\r\n The mapping of subsets to nonnegative dual values. \r\n\r\n C : set of frozensets of ints\r\n The initial componenets in this phase.\r\n\r\n v : int\r\n A nodeID for some vertex.\r\n\r\n tau : float\r\n THe current time.\r\n\r\n Returns:\r\n sumLoadingCore : set of frozensets of ints\r\n The cores of support sets incident on v.\r\n \"\"\"\r\n allCores = {_core(graph, C, S, tau) for S in y.keys() \r\n if v in graph.getSubgraphNeighbors(S)}\r\n return sum(a[1] for a in allCores)\r\n\r\n\r\ndef _inclusionWiseMaximalIncidentSubsets(graph, B, v):\r\n \"\"\"Return all inclusion-wise maximal elements of B that are incident on v in\r\n G.\r\n\r\n Args:\r\n graph : model.NetWrapper.NetWrapper\r\n The unweighted graph.\r\n\r\n B : set of frozensets of ints\r\n A set of sets of nodeIDs\r\n\r\n v : int\r\n A nodeID for some vertex.\r\n\r\n Returns:\r\n N : set of frozensets of ints\r\n Inclusion-wise maximal elements of B that are incident on v.\r\n \"\"\"\r\n N = {S for S in B if v in graph.getSubgraphNeighbors(S) \r\n and not np.any([S.issubset(R) for R in B.difference({S})])}\r\n return N\r\n\r\ndef _inclusionWiseMaximalSubsets(B, S):\r\n \"\"\"Return all inclusion-wise maximal elements of B that are subsets of S.\r\n\r\n Args:\r\n B : set of frozensets of ints\r\n A set of sets of nodeIDs\r\n\r\n S : frozenset of ints\r\n A set of nodeIDs\r\n\r\n Returns:\r\n N : set of frozensets of ints\r\n Inclusion-wise maximal elements of B that participate in S.\r\n \"\"\"\r\n return {R for R in B.difference(frozenset({S}))\r\n if R.issubset(S) and\r\n not np.any([R.issubset(Q) for Q in B.difference({R})])}\r\n\r\ndef _age(graph, C, tau):\r\n \"\"\"Compute the age of an initial component, C, as in the top of page 7. \r\n\r\n Args:\r\n graph : model.NetWrapper.NetWrapper\r\n The unweighted graph.\r\n\r\n C : frozenset of ints\r\n A set of nodeIDs. An initial component in this phase.\r\n\r\n tau : float\r\n THe current time.\r\n\r\n Returns:\r\n age : float\r\n The age of the subset. \r\n \"\"\"\r\n penalty = _penalty(graph, C)\r\n return min(penalty, tau)\r\n\r\ndef _core(graph, C, S, tau):\r\n \"\"\"Compute the core of subset S, or the initial componenet within S of \r\n greatest age.\r\n\r\n Args:\r\n graph : model.NetWrapper.NetWrapper\r\n The unweighted graph.\r\n\r\n C : set of frozensets of ints\r\n The initial componenets in this phase.\r\n\r\n S : frozenset of ints\r\n A set of nodeIDs. Should be a superset of some member(s) of C.\r\n\r\n tau : float\r\n THe current time.\r\n\r\n Returns:\r\n maxCandidate : tuple (frozenset of ints, float)\r\n The core, paired with its age.\r\n \"\"\"\r\n coreCandidates = ((c, _age(graph, c, tau)) for c in C if c.issubset(S))\r\n maxCandidate = max(coreCandidates, key=operator.itemgetter(1))\r\n return maxCandidate\r\n\r\ndef _sumDuals(y, S):\r\n \"\"\"Compute the sum of duals for all subsets of S.\r\n\r\n Args:\r\n y : defaultdict (frozenset : float)\r\n The mapping of subsets to nonnegative dual values. \r\n\r\n S : frozenset of ints\r\n A set of nodeIDs.\r\n\r\n Returns:\r\n sumDuals : float\r\n The aggregate penalty of the subset. \r\n \"\"\"\r\n sumDuals = sum(y[R] for R in y.keys() if R.issubset(S))\r\n return sumDuals\r\n\r\n\r\ndef _penalty(graph, S):\r\n \"\"\"Compute the total penalty for S.\r\n\r\n Args:\r\n graph : model.NetWrapper.NetWrapper\r\n The unweighted graph.\r\n\r\n S : frozenset of ints\r\n A set of nodeIDs.\r\n\r\n Returns:\r\n penaltuy : float\r\n The aggregate penalty of the subset. \r\n \"\"\"\r\n return sum(graph.getNodeAttribute(v, config_keys.NODE_KEY_PRIZE) for v in S)\r\n\r\ndef _cost(graph, S):\r\n \"\"\"Compute the total cost for S.\r\n\r\n Args:\r\n graph : model.NetWrapper.NetWrapper\r\n The unweighted graph.\r\n\r\n S : frozenset of ints\r\n A set of nodeIDs.\r\n\r\n Returns:\r\n cost : float\r\n The aggregate cost of the subset. \r\n \"\"\"\r\n return sum(graph.getNodeAttribute(v, config_keys.NODE_KEY_COST) for v in S)\r\n\r\ndef _getEpsilonOne(graph, y, A, I):\r\n \"\"\"Compute line 9 of Algorithm1 from the paper, computing $\\epsilon_1$:\r\n\r\n Args:\r\n graph : model.NetWrapper.NetWrapper\r\n The unweighted graph.\r\n\r\n y : defaultdict (frozenset : float)\r\n The mapping of subsets to nonnegative dual values. \r\n\r\n A : set of frozensets of ints\r\n The set of active moats.\r\n\r\n I : set of frozensets of ints\r\n The set of inactive moats.\r\n\r\n Returns:\r\n e_1 : float\r\n The minimum value of the expression.\r\n\r\n v_tilde : int\r\n The nodeID of the minimizing vertex. \r\n \"\"\"\r\n allMoatElements = reduce(lambda a, b : a.union(b), A.union(I))\r\n allCandidates = set(graph.getNodeList()).difference(allMoatElements)\r\n allPairs = ((v, _getEpsilonOneCanditate(graph, y, A, v)) \r\n for v in allCandidates)\r\n minPair = min(allPairs, key=operator.itemgetter(1))\r\n return minPair\r\n\r\ndef _getEpsilonOneCanditate(graph, y, A, v):\r\n \"\"\"Return a single value in the minimized expression in line 9 for v.\r\n\r\n Args:\r\n graph : model.NetWrapper.NetWrapper\r\n The unweighted graph.\r\n\r\n y : dict (frozenset : float)\r\n The mapping of subsets to nonnegative dual values. \r\n\r\n A : set of frozensets of ints\r\n The set of active moats.\r\n\r\n v : int\r\n The nodeID of a vertex.\r\n\r\n Returns:\r\n val : float\r\n The value of the expression.\r\n \"\"\"\r\n numActiveBoundarySets = _numBoundarySets(graph, A, v)\r\n if numActiveBoundarySets == 0:\r\n \"\"\"\r\n v is not incident upon any active moats. Ergo, it cannot become tight. \r\n Return infinity.\r\n \"\"\"\r\n return _infinity\r\n cost = graph.getNodeAttribute(v, config_keys.NODE_KEY_COST)\r\n sumBoundaryYs = _sumBoundaryYS(graph, y, v)\r\n val = (cost - sumBoundaryYs) / numActiveBoundarySets\r\n return val\r\n\r\ndef _sumBoundaryYS(graph, y, v): \r\n \"\"\"Return the sum y_S over S \\subseteq V such that v \\in \\Gamma(S)\r\n\r\n Sum the nonzero dual variables for subsets S where v is on the boundary of S \r\n (i.e. v in incident on S). We are able to sum over only set dual variables,\r\n which are those who have keys explicitly placed in y.\r\n\r\n Args:\r\n graph : model.NetWrapper.NetWrapper\r\n The unweighted graph.\r\n\r\n y : dict (frozenset : float)\r\n The mapping of subsets to nonnegative dual values. \r\n\r\n v : int\r\n The nodeID of a vertex.\r\n\r\n Returns:\r\n sumB : float\r\n The sum of the expression.\r\n \"\"\"\r\n return sum(y[S] for S in y.keys() if v in graph.getSubgraphNeighbors(S))\r\n\r\ndef _numBoundarySets(graph, A, v): \r\n \"\"\"Return the number of elements S of A such that v \\in \\Gamma(S).\r\n\r\n Args:\r\n graph : model.NetWrapper.NetWrapper\r\n The unweighted graph.\r\n\r\n A : set of frozensets of ints\r\n The set of active moats.\r\n\r\n v : int\r\n The nodeID of a vertex.\r\n\r\n Returns:\r\n sumB : int\r\n Number of such sets.\r\n \"\"\"\r\n return sum(1 for S in A if v in graph.getSubgraphNeighbors(S))\r\n\r\ndef _getEpsilonTwo(graph, y, A):\r\n \"\"\"Compute line 11 of Algorithm 1 from the paper, computing $\\epsilon_2$:\r\n\r\n Computes the total penalty for each active moat, subtracting the sum of dual \r\n variables indexing a subset of the moat.\r\n\r\n Args:\r\n graph : model.NetWrapper.NetWrapper\r\n The unweighted graph.\r\n\r\n y : dict (frozenset : float)\r\n The mapping of subsets to nonnegative dual values. \r\n\r\n A : set of frozensets of ints\r\n The set of active moats.\r\n\r\n Returns:\r\n e_2 : float\r\n The minimum value of the expression.\r\n \"\"\"\r\n allPairs = (_getEpsilonTwoCandidate(graph, y, S) for S in A)\r\n return min(allPairs)\r\n\r\n\r\ndef _getEpsilonTwoCandidate(graph, y, S):\r\n \"\"\"Compute line 11 of Algorithm 1 from the paper, computing $\\epsilon_2$:\r\n\r\n Computes the total penalty (prize) for the given active moat, subtracting \r\n the sum of dual variables indexing a subset of the moat.\r\n\r\n Args:\r\n graph : model.NetWrapper.NetWrapper\r\n The unweighted graph.\r\n\r\n y : dict (frozenset : float)\r\n The mapping of subsets to nonnegative dual values. \r\n\r\n S : frozenset of ints\r\n An active moat\r\n\r\n Returns:\r\n e_2 : float\r\n The value of the expression.\r\n \"\"\"\r\n totalPrize = _penalty(graph, S)\r\n sumDuals = _sumDuals(y, S)\r\n return totalPrize - sumDuals\r\n\r\ndef FST(graph, y, C, T, S, L, tau_bar):\r\n \"\"\"Performs the FST function, Algorithm 2 from the paper.\r\n\r\n Construct an auxiliary graph H_S. Recursively build the tree T connecting\r\n v_tilde to every loading core. \r\n\r\n References:\r\n Jochen K\\\"onemann, Sina Sadeghian, Laura Sanit\\`a, ``An LMP \r\n O(\\log n)-Approximation Algorithm for Node Weighted Prize Collecting \r\n Steiner Tree.'' FOCS, 2013.\r\n\r\n Args:\r\n graph : model.NetWrapper.NetWrapper\r\n The unweighted graph.\r\n\r\n y : defaultdict (frozenset : float)\r\n The mapping of subsets to nonnegative dual values. \r\n\r\n T : set of ints\r\n The phase tree that has been so far constructed.\r\n\r\n S : frozenset of ints\r\n An inclusion-wise maximal set incident upon v_tilde.\r\n\r\n L : frozenset of ints\r\n The set of nodeIDs already covered by the phase tree, T.\r\n \r\n tau_bar : float\r\n The time when the current phase ended.\r\n\r\n Returns:\r\n None\r\n \"\"\"\r\n H_S = _getAuxiliaryGraph(graph, y, C, S, L, tau_bar)\r\n nw_pcst_logger.info(_printCalls())\r\n nw_pcst_logger.debug(_printState(H_S, T, S, L=L, tau_bar=tau_bar))\r\n if len(L) == 1:\r\n P = L.copy()\r\n else:\r\n (a,b) = L\r\n P = frozenset(shortestPath(H_S, a, b))\r\n nw_pcst_logger.debug(\"FST found shortest path {} from {} to {}\"\r\n .format(P, a, b))\r\n supervertices = {R for R in H_S.getNodeList() if isinstance(R, frozenset)}\r\n for R in supervertices:\r\n neighborhood = frozenset(H_S.getNodeNeighbors(R))\r\n intersection = neighborhood.intersection(P)\r\n if intersection:\r\n T = FST(graph, y, C, T, R, intersection, tau_bar)\r\n originalVertices = {nodeID for nodeID in P if isinstance(nodeID, int)}\r\n T = T.union(originalVertices)\r\n nw_pcst_logger.debug(\"FST adding {} to T\".format(set(originalVertices)))\r\n core_S, _ = _core(graph, C, S, tau_bar)\r\n z_0 = next(iter(core_S))\r\n T = CVtx(graph, y, C, T, S, tau_bar, z_0, 0)\r\n T = T.union(core_S)\r\n nw_pcst_logger.debug(\"FST adding the core {} to T\".format(set(core_S)))\r\n return T\r\n\r\ndef _compressSupervertex(graph, S):\r\n \"\"\"Compress all members of S into a supervertex in place.\r\n\r\n Args:\r\n graph : model.NetWrapper.NetWrapper\r\n The unweighted graph. It will be modified in place.\r\n\r\n S : frozenset of ints\r\n A set of connected vertices.\r\n\r\n Returns:\r\n H : model.NetWrapper.NetWrapper\r\n A compressed graph.\r\n \"\"\"\r\n boundary = graph.getSubgraphNeighbors(S)\r\n for nodeID in S:\r\n graph.removeNode(nodeID)\r\n graph.addNode(S)\r\n for neighbor in boundary:\r\n graph.addEdge(S, neighbor)\r\n\r\ndef _getAuxiliaryGraph(graph, y, C, S, L, tau_bar):\r\n \"\"\"Create auxiliary graph H_S for FST, \r\n\r\n Starts with a subgraph graph[A \\cup L], then compresses inclusion-wise \r\n maximal inactive sets into supervertices. Then converts H_S into a directed\r\n graph with edge weights equal to the auxiliary cost\r\n\r\n Args:\r\n graph : model.NetWrapper.NetWrapper\r\n The unweighted graph.\r\n\r\n y : defaultdict (frozenset : float)\r\n The mapping of subsets to nonnegative dual values. \r\n\r\n C : set of frozensets of ints\r\n The initial componenets in this phase.\r\n\r\n S : frozenset of ints\r\n An inclusion-wise maximal set incident upon v_tilde.\r\n\r\n L : frozenset of ints\r\n The set of nodeIDs already covered by the phase tree, T.\r\n \r\n tau_bar : float\r\n The time when the current phase ended.\r\n\r\n Returns:\r\n H_S : model.NetWrapper.NetWrapper\r\n The auxiliary graph, will all maximal inactive sets compressed into\r\n supervertices.\r\n \"\"\"\r\n H_S = getInducedSubgraph(graph, S.union(L))\r\n \"\"\"\r\n Currently collecting all support duals and filtering out those with inactive\r\n cores. Any strict subset of S should be inactive anyway, but the paper \r\n really hammers home this point so better safe than sorry. \r\n \"\"\"\r\n allSupport = frozenset(y.keys())\r\n inactiveSupport = frozenset({R for R in allSupport \r\n if _core(graph, C, R, tau_bar)[1] < tau_bar})\r\n N = _inclusionWiseMaximalSubsets(inactiveSupport, S)\r\n for R in N:\r\n _compressSupervertex(H_S, R)\r\n H_S = DirectedNetWrapper(copy = H_S, set_edges=False)\r\n core_S, _ = _core(graph, C, S, tau_bar)\r\n for nodeID in H_S.getNodeList():\r\n if nodeID in core_S:\r\n aux_cost = 0.\r\n elif isinstance(nodeID, frozenset):\r\n aux_cost = 0.\r\n elif frozenset({nodeID}) in N:\r\n aux_cost = 0.\r\n else:\r\n aux_cost = _getAuxiliaryCost(graph, y, C, S, core_S, \r\n nodeID, tau_bar)\r\n neighbors = H_S.getNodeNeighbors(nodeID)\r\n for neighbor in neighbors:\r\n H_S.setEdgeAttribute(neighbor, \r\n nodeID, \r\n config_keys.EDGE_KEY_WEIGHT, \r\n aux_cost)\r\n return H_S\r\n\r\n\r\ndef _getAuxiliaryCost(graph, y, C, S, core_S, nodeID, tau_bar):\r\n \"\"\"Create auxiliary costs mapping, where c_S(v) is the sum of y_R for all\r\n R \\subseteq S such that v \\in \\Gamma(R) and core(R) = core(S).\r\n\r\n Args:\r\n graph : model.NetWrapper.NetWrapper\r\n The unweighted graph.\r\n\r\n y : defaultdict (frozenset : float)\r\n The mapping of subsets to nonnegative dual values. \r\n\r\n C : set of frozensets of ints\r\n The initial componenets in this phase.\r\n\r\n S : frozenset of ints\r\n An inclusion-wise maximal set incident upon v_tilde.\r\n\r\n core_S : frozenset of ints\r\n The core of S.\r\n\r\n nodeID : int\r\n A particular nodeID.\r\n\r\n tau_bar : float\r\n The time when the current phase ended.\r\n\r\n Returns:\r\n cost_S : defaultdict (frozenset of ints : float)\r\n The auxiliary costs of subsets R of S. \r\n \"\"\"\r\n load = (y[R] for R in y.keys() \r\n if R.issubset(S) and \r\n _core(graph, C, R, tau_bar)[0] == core_S and\r\n nodeID in graph.getSubgraphNeighbors(R))\r\n return sum(load)\r\n\r\ndef CVtx(graph, y, C, T, S_d, tau_bar, z, d):\r\n \"\"\"Performs the CVtx function, Algorithm 3 from the paper.\r\n\r\n Construct an auxiliary graph H_S. Recursively build the tree T connecting\r\n v_tilde to every loading core. \r\n\r\n References:\r\n Jochen K\\\"onemann, Sina Sadeghian, Laura Sanit\\`a, ``An LMP \r\n O(\\log n)-Approximation Algorithm for Node Weighted Prize Collecting \r\n Steiner Tree.'' FOCS, 2013.\r\n\r\n Args:\r\n graph : model.NetWrapper.NetWrapper\r\n The unweighted graph.\r\n\r\n y : defaultdict (frozenset : float)\r\n The mapping of subsets to nonnegative dual values. \r\n\r\n T : set of ints\r\n The phase tree that has been so far constructed.\r\n\r\n S : frozenset of ints\r\n An inclusion-wise maximal set incident upon v_tilde.\r\n\r\n L : frozenset of ints\r\n The set of nodeIDs already covered by the phase tree, T.\r\n \r\n tau_bar : float\r\n The time when the current phase ended.\r\n\r\n z : int\r\n Index of an aribtrary node in the core of S.\r\n\r\n d : int\r\n The index level. Starts at 0 and increments at every recursive call.\r\n\r\n Returns:\r\n None\r\n \"\"\"\r\n H_S_d = _getAuxiliaryGraph(graph, y, C, S_d, \r\n T.union(frozenset({z})), tau_bar)\r\n nw_pcst_logger.info(_printCalls())\r\n nw_pcst_logger.debug(_printState(H_S_d, T, S_d, tau_bar=tau_bar, z=z, d=d))\r\n T_hat = T.copy()\r\n supervertices = {R for R in H_S_d.getNodeList() if isinstance(R, frozenset)}\r\n for R in supervertices:\r\n intersection_T = R.intersection(T)\r\n if intersection_T:\r\n T_hat = T_hat.difference(R)\r\n T_hat = T_hat.union(intersection_T)\r\n \"\"\"\r\n Holy fuck this is wicked inefficient as written. Basically we want to \r\n shortest path in H_S_d connecting z to any member of T_hat. \r\n \"\"\"\r\n Q = list()\r\n Q_length = _infinity\r\n for nodeID in T_hat:\r\n candidateQ = shortestPath(H_S_d, z, nodeID)\r\n candidateLength = pathLength(H_S_d, candidateQ)\r\n if candidateLength < Q_length:\r\n Q = candidateQ\r\n Q_length = candidateLength\r\n nw_pcst_logger.debug(\"CVtx found path {}\".format(Q))\r\n originalVertices = {nodeID for nodeID in Q if isinstance(nodeID, int)}\r\n T = T.union(originalVertices)\r\n nw_pcst_logger.debug(\"CVtx Adding {} to T\".format(set(originalVertices)))\r\n for R in supervertices.difference(T_hat):\r\n boundaryR = frozenset(H_S_d.getNodeNeighbors(R))\r\n intersection_Q = boundaryR.intersection(frozenset(Q))\r\n if intersection_Q:\r\n nw_pcst_logger.debug(\"Calling FSAT on L={}\"\r\n .format(set(intersection_Q)))\r\n T = FST(graph, y, C, T, R, intersection_Q, tau_bar)\r\n q_l = Q[-1]\r\n q_l_1 = Q[-2]\r\n if q_l in supervertices:\r\n T = CVtx(graph, y, C, T, R, tau_bar, q_l_1, d+1)\r\n return T\r\n\r\ndef _printCalls():\r\n curframe = inspect.currentframe()\r\n calframe = inspect.getouterframes(curframe, 2)\r\n return \"Running {} called by {}\".format(calframe[1][3], calframe[2][3])\r\n\r\ndef _printState(H, T, S, L=None, tau_bar=None, z=None, d=None):\r\n state = list()\r\n state.append(\"State Readout:\")\r\n state.append(\"\\tH nodes : {}\".format(H.getNodeList()))\r\n state.append(\"\\tH edges : {}\".format(H._G.edges()))\r\n state.append(\"\\tT : {}\".format(set(T)))\r\n state.append(\"\\tS : {}\".format(set(S)))\r\n if L is not None:\r\n state.append(\"\\tL : {}\".format(set(L)))\r\n if tau_bar is not None:\r\n state.append(\"\\ttau_bar : {}\".format(tau_bar))\r\n if z is not None:\r\n state.append(\"\\tz : {}\".format(z))\r\n if d is not None:\r\n state.append(\"\\td : {}\".format(d))\r\n return \"\\n\".join(state)\r\n\r\ndef _printableSetOfSets(B):\r\n return list(map(set, B))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"model/nw_pcst.py","file_name":"nw_pcst.py","file_ext":"py","file_size_in_byte":33745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"649571508","text":"import constant as c\nfrom bs4 import BeautifulSoup\nimport sys\nimport os.path\n\n\ndef checking(session):\n print(\"started checking...\\n\")\n\n problem_url = \"https://atcoder.jp/contests/{}/tasks/{}\".format(c.contest_name, c.task_name)\n\n r = session.get(problem_url)\n soup = BeautifulSoup(r.text, 'lxml').find_all(class_=\"lang-ja\")\n\n if not soup:\n print(\" is missing\")\n sys.exit()\n\n section = BeautifulSoup(str(soup[0]), 'lxml').find_all(\"section\")\n\n prob_in = [BeautifulSoup(str(s), 'lxml').pre.string for s in [s for s in section if '入力例' in str(s)]]\n prob_out = [BeautifulSoup(str(s), 'lxml').pre.string for s in [s for s in section if '出力例' in str(s)]]\n\n if len(prob_in) != len(prob_out):\n print(\"Different number of sample cases\")\n sys.exit()\n\n if not prob_in:\n print(\"nothing sample case\")\n sys.exit()\n\n os.system(\"g++ -std=gnu++1y -O2 -o ./a.out \" + c.source_code_path)\n\n for i in range(len(prob_in)):\n with open(\"./in.txt\", mode='w') as f:\n f.write(prob_in[i])\n\n os.system(\"./a.out < in.txt > out.txt\")\n\n with open(\"./out.txt\", mode='r') as f:\n your_out = f.read()\n os.remove(\"./in.txt\")\n os.remove(\"./out.txt\")\n\n if your_out.split() != prob_out[i].split():\n print(\"WA\")\n print(\"-input-\")\n print(prob_in[i])\n print(\"-your output-\")\n print(your_out)\n print(\"-true output-\")\n print(prob_out[i])\n os.remove(\"./a.out\")\n sys.exit()\n\n os.remove(\"./a.out\")\n\n print(\"AC そのまさかだよ\")\n print(\"\\nfinished checking\\n\")\n","sub_path":"check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"27711873","text":"from pathlib import Path\nfrom unittest import TestCase\n\nfrom Bio.Seq import Seq\nfrom Bio.SeqRecord import SeqRecord\n\nfrom make_prg.from_msa.prg_builder import PrgBuilder\nfrom make_prg.from_msa.interval_partition import IntervalType, Interval\nfrom tests.from_msa import make_alignment, MSA\n\nthis_dir = Path(__file__).resolve().parent\ndata_dir = this_dir.parent / \"data\" / \"make_prg_from_msa\"\n\n\nclass TestConsensusString(TestCase):\n def test_all_match(self):\n alignment = make_alignment([\"AATTA\", \"AATTA\"])\n result = PrgBuilder.get_consensus(alignment)\n self.assertEqual(result, \"AATTA\")\n\n def test_mixed_match_nonmatch(self):\n alignment = make_alignment([\"AAGTA\", \"CATTA\"])\n result = PrgBuilder.get_consensus(alignment)\n self.assertEqual(result, \"*A*TA\")\n\n def test_indel_nonmatch(self):\n alignment = make_alignment([\"AAAA\", \"A--A\"])\n result = PrgBuilder.get_consensus(alignment)\n self.assertEqual(result, \"A**A\")\n\n def test_IUPACAmbiguous_nonmatch(self):\n alignment = make_alignment([\"RYA\", \"RTA\"])\n result = PrgBuilder.get_consensus(alignment)\n self.assertEqual(result, \"**A\")\n\n def test_N_special_treatment(self):\n \"\"\"\n i)A and N at pos 2 are different, but still consensus\n ii)N and N at pos 0 are same, but not consensus\"\"\"\n alignment = make_alignment([\"NTN\", \"NTA\"])\n result = PrgBuilder.get_consensus(alignment)\n self.assertEqual(result, \"*TA\")\n\n def test_all_gap_nonmatch(self):\n alignment = make_alignment([\"A--A\", \"A--A\"])\n result = PrgBuilder.get_consensus(alignment)\n self.assertEqual(result, \"A**A\")\n\n\ndef msas_equal(al1: MSA, al2: MSA):\n if len(al1) != len(al2):\n return False\n for i in range(len(al1)):\n if al1[i].seq != al2[i].seq:\n return False\n if al1[i].id != al2[i].id:\n return False\n return True\n\n\nclass TestSubAlignments(TestCase):\n @classmethod\n def setUpClass(cls):\n cls.alignment = make_alignment(\n [\"AAAT\", \"C--C\", \"AATT\", \"GNGG\"], [\"s1\", \"s2\", \"s3\", \"s4\"]\n )\n\n def test_GivenOrderedIds_SubalignmentInSequenceOrder(self):\n result = PrgBuilder.get_sub_alignment_by_list_id([\"s1\", \"s3\"], self.alignment)\n expected = MSA([self.alignment[0], self.alignment[2]])\n self.assertTrue(msas_equal(expected, result))\n\n def test_GivenUnorderedIds_SubalignmentStillInSequenceOrder(self):\n \"\"\"\n Sequences given rearranged are still output in input order\n \"\"\"\n result = PrgBuilder.get_sub_alignment_by_list_id([\"s3\", \"s1\"], self.alignment)\n expected = MSA([self.alignment[0], self.alignment[2]])\n self.assertTrue(msas_equal(expected, result))\n\n def test_get_subalignment_with_interval(self):\n result = PrgBuilder.get_sub_alignment_by_list_id(\n [\"s2\", \"s3\"], self.alignment, [0, 2]\n )\n expected = make_alignment([\"C--\", \"AAT\"], [\"s2\", \"s3\"])\n self.assertTrue(msas_equal(expected, result))\n\n\nclass TestSkipClustering(TestCase):\n \"\"\"\n Test the conditions under which clustering is not performed.\n \"\"\"\n\n def setUp(self):\n \"\"\"\n Set of parameters whereby clustering is to be performed.\n We'll modify each of them in turn\n \"\"\"\n self.aligned_seqs = [\"ATTTTTTA\", \"A--TTTTA\", \"ATTTCTTA\"]\n self.tested_params = {\n \"interval\": Interval(IntervalType.Match, 0, 7),\n \"max_nesting\": 2,\n \"nesting_level\": 1,\n \"min_match_length\": 2,\n \"alignment\": make_alignment(self.aligned_seqs),\n }\n\n def test_original_params_no_skip_clustering(self):\n self.assertFalse(PrgBuilder.skip_clustering(**self.tested_params))\n\n def test_max_nesting_reached_skip_clustering(self):\n self.tested_params[\"nesting_level\"] = 2\n self.assertTrue(PrgBuilder.skip_clustering(**self.tested_params))\n\n def test_small_interval_skip_clustering(self):\n self.tested_params[\"interval\"].stop = 1\n self.assertTrue(PrgBuilder.skip_clustering(**self.tested_params))\n\n def test_too_few_seqs_skip_clustering(self):\n self.tested_params[\"alignment\"] = self.tested_params[\"alignment\"][0:1]\n self.assertTrue(PrgBuilder.skip_clustering(**self.tested_params))\n\n def test_ambiguous_alignment_skip_clustering(self):\n \"\"\"\n `added_seq` below is an equally valid alignment as \"A--TTTTA\" to the sequence\n \"ATTAATTA\"\n If we have such ambiguous alignments (defined as more than one gapped alignment\n corresponding to the same ungapped sequence), we choose not to cluster the\n alignment, as it can create ambiguous graphs (whereby different paths spell same sequence)\n \"\"\"\n added_seq = \"ATTTT--A\"\n self.tested_params[\"alignment\"] = make_alignment(\n self.aligned_seqs + [added_seq]\n )\n self.assertTrue(PrgBuilder.skip_clustering(**self.tested_params))\n\n\nclass Test_Integration_FullBuilds(TestCase):\n def test_answers_non_nested(self):\n infile = data_dir / \"match.fa\"\n aseq = PrgBuilder(infile)\n self.assertEqual(aseq.prg, \"ACGTGTTTTGTAACTGTGCCACACTCTCGAGACTGCATATGTGTC\")\n\n infile = data_dir / \"nonmatch.fa\"\n aseq = PrgBuilder(infile)\n self.assertEqual(aseq.prg, \" 5 AAACGTGGTT 6 CCCCCCCCCC 5 \")\n\n infile = data_dir / \"match.nonmatch.fa\"\n aseq = PrgBuilder(infile)\n self.assertEqual(aseq.prg, \"AAACG 5 TGGTT 6 CCCCC 5 \")\n\n infile = data_dir / \"nonmatch.match.fa\"\n aseq = PrgBuilder(infile)\n self.assertEqual(aseq.prg, \" 5 AAACGT 6 CCCCCC 5 GGTT\")\n\n infile = data_dir / \"match.nonmatch.match.fa\"\n aseq = PrgBuilder(infile)\n self.assertEqual(aseq.prg, \"AAACG 5 T 6 C 5 GGTT\")\n\n infile = data_dir / \"shortmatch.nonmatch.match.fa\"\n aseq = PrgBuilder(infile)\n self.assertEqual(aseq.prg, \" 5 AAACGT 6 ATTTTC 5 GGTT\")\n\n infile = data_dir / \"match.nonmatch.shortmatch.fa\"\n aseq = PrgBuilder(infile)\n self.assertEqual(aseq.prg, \"AAAC 5 GTGGTT 6 CCCCCT 5 \")\n\n infile = data_dir / \"match.staggereddash.fa\"\n aseq = PrgBuilder(infile)\n self.assertEqual(aseq.prg, \"AAACGTGGTT\")\n\n infile = data_dir / \"contains_n.fa\"\n aseq = PrgBuilder(infile)\n self.assertEqual(aseq.prg, \"AAACG 5 T 6 C 5 GGTT\")\n\n infile = data_dir / \"contains_RYKMSW.fa\"\n aseq = PrgBuilder(infile)\n self.assertEqual(aseq.prg, \"AAACG 5 T 6 C 5 GGTT\")\n\n infile = data_dir / \"contains_n_and_RYKMSW.fa\"\n aseq = PrgBuilder(infile)\n self.assertEqual(aseq.prg, \"AAACG 5 T 6 C 5 GGTT\")\n\n infile = data_dir / \"contains_n_and_RYKMSW_no_variants.fa\"\n aseq = PrgBuilder(infile)\n self.assertEqual(aseq.prg, \"AAACGTGGTT\")\n\n # with pytest.raises(Exception):\n # aseq = AlignedSeq(\"test/fails.fa\")\n\n def test_nested_snp_backgrounds(self):\n infile = data_dir / \"nested_snps_seq_backgrounds.fa\"\n aseq = PrgBuilder(infile, min_match_length=3)\n self.assertEqual(\n aseq.prg, \" 5 AAAA 7 T 8 C 7 AAAAAA 6 CCCC 9 T 10 G 9 CCCCCC 5 \"\n )\n\n def test_nested_snps_under_del(self):\n infile = data_dir / \"nested_snps_deletion.fa\"\n aseq = PrgBuilder(infile, min_match_length=1)\n self.assertEqual(aseq.prg, \"A 5 AA 7 C 8 T 7 AAAA 9 T 10 G 9 AA 6 A 5 AA\")\n","sub_path":"tests/from_msa/test_prg_builder.py","file_name":"test_prg_builder.py","file_ext":"py","file_size_in_byte":7470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"84598964","text":"from collections import deque\r\nclass Solution:\r\n def levelOrder(self, root: 'Node'):\r\n if not root:\r\n return []\r\n queue,result = deque([root]),[]\r\n while queue:\r\n size,tmp = len(queue),[]\r\n for _ in range(size):\r\n node = queue.popleft()\r\n tmp.append(node.val)\r\n if node.children:\r\n for child in node.children:\r\n queue.append(child)\r\n if tmp:\r\n result.append(tmp)\r\n return result","sub_path":"429_n-ary_tree_level_order_traversal.py","file_name":"429_n-ary_tree_level_order_traversal.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"285741117","text":"import quantecon as qe\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sympy import *\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\n###############################################################################\n#\n# Question 1\n#\n\n'''\nCompute and plot the stationary distribution of the matrix using one of the\nmethods in quantecon's MarkovChain object, combined with matplotlib.\n'''\n###############################################################################\n\nP = [[0.222, 0.222, 0.215, 0.187, 0.081, 0.038, 0.029, 0.006],\n [0.221, 0.220, 0.215, 0.188, 0.082, 0.039, 0.029, 0.006],\n [0.207, 0.209, 0.210, 0.194, 0.090, 0.046, 0.036, 0.008],\n [0.198, 0.201, 0.207, 0.198, 0.095, 0.052, 0.040, 0.009],\n [0.175, 0.178, 0.197, 0.207, 0.110, 0.067, 0.054, 0.012],\n [0.182, 0.184, 0.200, 0.205, 0.106, 0.062, 0.050, 0.011],\n [0.123, 0.125, 0.166, 0.216, 0.141, 0.114, 0.094, 0.021],\n [0.084, 0.084, 0.142, 0.228, 0.170, 0.143, 0.121, 0.028]]\n\nmc = qe.MarkovChain(P, (\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\"))\nstationary_dist_p = mc.stationary_distributions\nprint(stationary_dist_p)\nplt.plot(list(range(0, len(P[0]))),stationary_dist_p[0], color = 'r')\nplt.scatter(list(range(0, len(P[0]))),stationary_dist_p[0])\nplt.title(r\"Stationary Distribution of $P$\")\nplt.ylabel(\"Probability\")\nplt.xlabel(\"Period\")\nplt.show()\n\n\n\n###############################################################################\n#\n# Question 1\n#\n\n'''\n\nExercise 2\nThis Markov chain is asymptotically stationary and ergodic, which means that,\nfor any sample path $\\{X_t\\}$ generated according to $P$, we have\n\n$$ \\lim_{n \\to \\infty} \\frac{1}{n} \\sum_{t=1}^n \\mathbb 1\\{X_t = j\\} = \\psi(j) $$\nwhere $\\psi$ is the stationary distribution and $j$ is an integer between 0 and\n7 (the set of possible states).\n\nUse this fact to compute an approximation to the stationary distribution $\\psi$.\nConfirm visually that your result is similar to the stationary distribution you\ncomputed in exercise 1.\n\nYou can make use of the simulate method in MarkovChain.\n'''\n###############################################################################\n\nsimulation = qe.MarkovChain(P).simulate_indices(ts_length = 1000, random_state = 1234)\n'''\nstate = np.zeros(len(P[0]))\n\nfor i in range(0, len(state)):\n states = simulation == i\n state[i] = np.sum(states)\n'''\n\nstate_count = pd.Series(simulation).value_counts()/1000\n\nplt.plot(list(range(0, len(P[0]))), state_count, color = 'g', label = 'Simulation')\nplt.scatter(list(range(0, len(P[0]))),state_count, color = 'g')\nplt.plot(list(range(0, len(P[0]))),stationary_dist_p[0], ':',color = 'r', label = 'Actual')\nplt.scatter(list(range(0, len(P[0]))),stationary_dist_p[0],color = 'r')\nplt.title(r\"Simulation vs Actual Stationary Distributions of $P$\")\nplt.ylabel(\"Probability\")\nplt.xlabel(\"Period\")\nplt.legend()\nplt.show()\n\n\n###############################################################################\n#\n# Question 3\n#\n\n'''\nExercise 3\nErgodicity also implies that, if we simulate a large number of paths and then\nlook at the cross section at some $T$, where $T$ is suitably large, then the\nempirical distribution should be close to the stationary distribution.\n\nConfirm this by simulation and visual inspection, as above.\n\n(In this context, the empirical distribution of a sample is the fraction of\nobservations that take value $j$ for each $j$ in $0, \\ldots, 7$.)\n'''\n################################################################################\n\n#simulate a large number of paths:\npaths = np.zeros(1000)\n\n#T is suitably large\nT = 1000\n\nfor i in range(len(paths)):\n T_sim = qe.MarkovChain(P).simulate_indices(ts_length = T)\n paths[i] = T_sim[-1]\n\ncounts = pd.Series(paths).value_counts()/1000\n\n\nplt.figure(figsize = (10,10))\nplt.plot(list(range(0, len(P[0]))), counts, '--', color = 'b', label = 'Ex #3')\nplt.scatter(list(range(0, len(P[0]))),counts, color = 'b')\nplt.plot(list(range(0, len(P[0]))), state_count,':', color = 'g', label = 'Ex #2')\nplt.scatter(list(range(0, len(P[0]))),state_count, color = 'g')\nplt.plot(list(range(0, len(P[0]))),stationary_dist_p[0], color = 'r', label = 'Ex #1')\nplt.scatter(list(range(0, len(P[0]))),stationary_dist_p[0],color = 'r')\nplt.title(r\"Simulation vs Actual Stationary Distributions of $P$\")\nplt.ylabel(\"Probability\")\nplt.xlabel(\"Period\")\nplt.legend()\nplt.show()\n","sub_path":"ProbSets/Econ/Week2/week2_pset3.py","file_name":"week2_pset3.py","file_ext":"py","file_size_in_byte":4363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"289499616","text":"import os\ndef deleteTree(path):\n\tif not os.path.isdir(path):\n\t\treturn\n\tfor name in os.listdir(path):\n\t\tfullName = os.path.join(path, name)\n\t\tif os.path.isdir(fullName):\n\t\t\tdeleteTree(fullName)\n\t\telse:\n\t\t\tos.remove(fullName)\n\tos.rmdir(path)\n","sub_path":"src1/out/deleteTree.py","file_name":"deleteTree.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"244397207","text":"def solution(numbers):\n answer = []\n stack = []\n\n #뒤에서부터 확인 \n for i in range(len(numbers)-1,-1,-1):\n #스택에 남아있는 값들 중 현재 값보다 큰 값이 있는 지 확인\n #작은 값들은 모두 pop 처리\n while stack and numbers[i] >= stack[-1]:\n stack.pop()\n\n #만약 스택에 아무것도 없다면 ?\n if len(stack) == 0:\n answer.append(-1)\n else:\n answer.append(stack[-1])\n stack.append(numbers[i])\n #현재 스택에 남아 있는 값이 더 큰 값\n\n return answer[::-1]\n\n\n","sub_path":"프로그래머스/unrated/154539. 뒤에 있는 큰 수 찾기/뒤에 있는 큰 수 찾기.py","file_name":"뒤에 있는 큰 수 찾기.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"115526704","text":"from flask import Flask, render_template,redirect,url_for,request\nfrom flask import session\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n@app.route(\"/counter\",methods=[\"GET\",\"POST\"])\ndef counter():\n if \"counter\" not in session:\n session['counter']=0\n counter = 0\n else:\n counter = session['counter']\n \n if request.method==\"GET\":\n return render_template(\"counter.html\",counter=counter,s=session)\n else:\n b = request.form['b']\n if b=='+':\n counter = session['counter']\n counter = counter + 1\n session['counter'] = counter\n elif b=='-':\n counter =session['counter']\n counter = counter - 1\n session['counter']=counter\n else:\n session.pop('counter',None)\n return redirect(url_for(\"counter\"))\n\napp.secret_key=\"don't put this on github\"\nif __name__ == \"__main__\":\n app.debug = True\n app.run(host=\"0.0.0.0\", port=8000)\n\n","sub_path":"flask-session/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"243848048","text":"import requests\r\nimport sqlite3\r\nfrom bs4 import BeautifulSoup\r\n\r\n# author: Hope Foreman\r\n# date: 11, Oct 2021\r\n\r\n# setting up the database\r\ndatabase_name = 'golfers.db'\r\nconn = sqlite3.connect(database_name)\r\nc = conn.cursor()\r\n\r\n# setting up connection with beautiful soup and web page\r\nurl = 'http://www.owgr.com/ranking'\r\nr = requests.get(url)\r\n# checks to see if the request is 200/passes\r\nprint(r.status_code)\r\n\r\n# using beautifulsoup to find the table\r\nsoup = BeautifulSoup(r.text, 'lxml')\r\ntable = soup.find('table')\r\n\r\nranking = []\r\n\r\n\r\ndef create_golfer_table():\r\n c.execute(\"\"\"CREATE TABLE golfers (\r\n this_week INTEGER,\r\n last_week INTEGER,\r\n end_2020 INTEGER,\r\n name TEXT,\r\n average_points REAL,\r\n total_points REAL,\r\n events_played INTEGER,\r\n points_lost REAL,\r\n points_gained REAL\r\n )\"\"\")\r\n\r\n\r\n# method for indexing the row\r\ndef find_table_row(index):\r\n for row in table.find_all('tr')[index]:\r\n ranking.append(row.getText().strip())\r\n while \"\" in ranking:\r\n ranking.remove(\"\")\r\n ranking.pop()\r\n #print(ranking)\r\n\r\n\r\n# method to print record\r\ndef print_record(record):\r\n print(record[0], record[1], record[2], record[3],\r\n record[4], record[5], record[6], record[7], record[8])\r\n\r\n\r\n# method to insert a persons row stats in sqlite\r\n# param takes in the row's number\r\ndef inserting_sql(record_number):\r\n sql = \"\"\"INSERT INTO golfers \r\n (this_week, last_week, end_2020, name, average_points, total_points, events_played, points_lost, points_gained) \r\n VALUES(?,?,?,?,?,?,?,?,?);\"\"\"\r\n c.execute(sql, record_number)\r\n\r\n\r\n# calling the method to create sql table in database\r\ncreate_golfer_table()\r\n\r\n# gets and stores the top 100 rows from table into database\r\nfor i in range(100):\r\n find_table_row(i+1)\r\n print_record(ranking)\r\n inserting_sql(ranking)\r\n ranking = []\r\n\r\n\r\nconn.commit()\r\nconn.close()\r\n","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"536691596","text":"__author__ = \"Takashi Higashimura \"\n\nfrom unicon.plugins.iosxr.service_patterns import IOSXRReloadPatterns\n\n\nclass IOSXRSpitfireReloadPatterns(IOSXRReloadPatterns):\n def __init__(self):\n super().__init__()\n self.system_config_completed = r\"^(.*?)SYSTEM CONFIGURATION COMPLETED\"\n self.reloading_node = r\"^(.*?)Reloading node .*\"","sub_path":"src/unicon/plugins/iosxr/spitfire/service_patterns.py","file_name":"service_patterns.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"23026154","text":"from django.urls import path\nfrom django.conf.urls import url\nfrom Bigflow.FA import views\nurlpatterns = [\n # FA Pages\n #url(r'^(?P[\\w-]+)/$', views.FA_Template , name='FA_Template'),\n path('fa_summary/', views.fa_summary, name=\"fa_summary\"),\n path('fa_assetadd/', views.fa_assetadd, name=\"fa_assetadd\"),\n path('fa_assetchecker/', views.fa_assetchecker, name=\"fa_assetchecker\"),\n path('fa_capdate_change/', views.fa_capdate_change, name=\"fa_capdate_change\"),\n path('fa_capdate_changeplus/', views.fa_capdate_changeplus, name=\"fa_capdate_changeplus\"),\n path('cp_datechecker/', views.fa_capdate_checker, name=\"fa_capdate_checker\"),\n path('fa_asset_parentchild/', views.fa_asset_parentchild, name=\"fa_asset_parentchild\"),\n path('fa_asset_parent_plus/', views.fa_asset_parent_plus, name=\"fa_asset_parent_plus\"),\n path('fa_asset_parentchildcheck/', views.fa_asset_parentchildcheck, name=\"fa_asset_parentchildcheck\"),\n path('fa_writeoff/', views.fa_writeoff, name=\"fa_writeoff\"),\n path('fa_writeoffplus/', views.fa_writeoffplus, name=\"fa_writeoffplus\"),\n path('fa_writeoff_check/', views.fa_writeoff_check, name=\"fa_writeoff_check\"),\n path('fa_impairment/', views.fa_impairment, name=\"fa_impairment\"),\n path('fa_impairmentplus/', views.fa_impairmentplus, name=\"fa_impairmentplus\"),\n path('fa_impairment_check/', views.fa_impairment_check, name=\"fa_impairment_check\"),\n path('fa_asset_merge/', views.fa_asset_merge, name=\"fa_asset_merge\"),\n path('fa_asset_merge_plus/', views.fa_asset_merge_plus, name=\"fa_asset_merge_plus\"),\n path('fa_asset_merge_checker/', views.fa_asset_merge_checker, name=\"fa_asset_merge_checker\"),\n path('fa_asset_split/', views.fa_asset_split, name=\"fa_asset_split\"),\n path('fa_asset_split_plus/', views.fa_asset_split_plus, name=\"fa_asset_split_plus\"),\n path('fa_asset_split_checker/', views.fa_asset_split_checker, name=\"fa_asset_split_checker\"),\n path('fa_asset_catgry/', views.fa_asset_catgry, name=\"fa_asset_catgry\"),\n path('fa_asset_catgry_plus/', views.fa_asset_catgry_plus, name=\"fa_asset_catgry_plus\"),\n path('fa_asset_catgrychecker/', views.fa_asset_catgrychecker, name=\"fa_asset_catgrychecker\"),\n path('fa_asset_sale/', views.fa_asset_sale, name=\"fa_asset_sale\"),\n path('fa_asset_saleplus/', views.fa_asset_saleplus, name=\"fa_asset_saleplus\"),\n path('fa_asset_sale_checker/', views.fa_asset_sale_checker, name=\"fa_asset_sale_checker\"),\n path('fa_value_reduction/', views.fa_value_reduction, name=\"fa_value_reduction\"),\n path('fa_value_reduction_plus/', views.fa_value_reduction_plus, name=\"fa_value_reduction_plus\"),\n path('fa_reduction_checker/', views.fa_reduction_checker, name=\"fa_reduction_checker\"),\n path('fa_transfer_maker/', views.fa_transfer_maker, name=\"fa_transfer_maker\"),\n path('fa_transferplus/', views.fa_transferplus, name=\"fa_transferplus\"),\n path('fa_transfer_checker/', views.fa_transfer_checker, name=\"fa_transfer_checker\"),\n path('fa_depreciation_calc/', views.fa_depreciation_calc, name=\"fa_depreciation_calc\"),\n path('fa_posttocbs/', views.fa_posttocbs, name=\"fa_posttocbs\"),\n path('fa_cwip_checker/', views.fa_cwip_checker, name=\"fa_cwip_checker\"),\n path('fa_query_summary/',views.fa_query_summary,name=\"fa_query_summary\"),\n path('fa_financial_year/', views.fa_financial_year, name=\"fa_financial_year\"),\n path('fa_financial_year_plus/', views.fa_financial_year_plus, name=\"fa_financial_year_plus\"),\n path('fa_financial_year_checker/', views.fa_financial_year_checker, name=\"fa_financial_year_checker\"),\n path('fa_physic_verify/', views.fa_physic_verify, name=\"fa_physic_verify\"),\n path('fa_physic_verify_plus/', views.fa_physic_verify_plus, name=\"fa_physic_verify_plus\"),\n path('fa_physic_verify_check/', views.fa_physic_verify_check, name=\"fa_physic_verify_check\"),\n\n #cat Master\n path('fa_asset_catgry_master_plus/', views.fa_asset_catgry_master_plus, name=\"fa_asset_catgry_master_plus\"),\n path('fa_asset_catgry_summary/', views.fa_asset_catgry_summary, name=\"fa_asset_catgry_summary\"),\n path('fa_mst_location/', views.fa_mst_location, name=\"fa_mst_location\"),\n path('fa_image_popup/', views.fa_image_popup, name=\"fa_image_popup\"),\n path('get_entity_branch/',views.get_entity_branch,name=\"get_entity_branch\"),\n\n # FA Fuctions\n path('asset_details/', views.asset_details, name=\"asset_details\"),\n path('drop_data/', views.drop_data, name=\"drop_data\"),\n path('generate_saletemplate/', views.generate_saletemplate, name=\"generate_saletemplate\"),\n path('get_branch/', views.get_branch, name=\"get_branch\"),\n path('imageconvert_base64/', views.imageconvert_base64, name=\"imageconvert\"),\n path('save_asset/', views.save_asset, name=\"save_asset\"),\n path('branch_details/', views.branch_details, name=\"branch_details\"),\n path('save_location/', views.save_location, name=\"save_location\"),\n path('writeoff_summary/', views.writeoff_summary, name=\"writeoff_summary\"),\n path('asset_checker/', views.asset_checker, name=\"asset_checker\"),\n path('fa_category/', views.fa_category, name=\"fa_category\"),\n path('fa_category_get/', views.fa_category_get, name=\"fa_category_get\"),\n path('drop_branch/', views.drop_branch, name=\"drop_branch\"),\n path('fa_assetadd/', views.fa_assetadd, name=\"fa_assetadd\"),\n # path('imageconvert/', views.imageconvert, name=\"imageconvert\"),\n path('sale_make/', views.sale_make, name=\"sale_make\"),\n path('dep_calculation/', views.dep_calculation, name=\"dep_calculation\"),\n path('fin_year/', views.fin_year, name=\"fin_year\"),\n path('cust_data/', views.cust_data, name=\"cust_data\"),\n path('dep_ratedata/', views.dep_ratedata, name=\"dep_ratedata\"),\n path('glno_data/', views.glno_data, name=\"glno_data\"),\n path('dep_data_get/', views.dep_data_get, name=\"dep_data_get\"),\n path('cwip_group_get/', views.cwip_group_get, name=\"cwip_group_get\"),\n path('data_cc/', views.data_cc, name=\"data_cc\"),\n path('data_bs/', views.data_bs, name=\"data_bs\"),\n path('get_state_drop/', views.get_state_drop, name=\"get_state_drop\"),\n path('posttocbs_set/',views.posttocbs_set,name=\"posttocbs_set\"),\n path('drop_data/', views.drop_data, name=\"drop_data\"),\n path('save_asset/', views.save_asset, name=\"save_asset\"),\n\n path('commondata_get/', views.commondata_get, name=\"commondata_get\"),\n path('repost/', views.repost_set, name=\"repost\"),\n\n\n #Main CSS\n path('css_main_trail/', views.css_main_trail, name=\"css_main_trail\"),\n\n ## excel\n # path('dpforecast_getexcel/', views.dpforecast_getexcel, name='dpforecast_getexcel'),\n # path('dpregular_getexcel/', views.dpregular_getexcel, name='dpregular_getexcel'),\n path('check_file_exists/',views.check_file_exists,name='check_file_exists'),\n path('dpregular_getexcel/', views.fa_excel_s3, name='dpregular_getexcel'), #Directly download files\n path('dpregular_getexcel_test/', views.fa_excel, name='dpregular_getexcel_test'),\n path('dpforecast_getexcel_test/', views.dpforecast_getexcel, name='dpforecast_getexcel_test'),\n path('check_file_exists_forecast/',views.check_file_exists_forecast,name='check_file_exists_forecast'),\n path('dpforecast_getexcel/', views.fa_excel_s3_forecast, name='dpforecast_getexcel'),\n]\n","sub_path":"Bigflow/FA/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":7293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"146690313","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('GAOGDS', '0005_auto_20170215_1549'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ServerGroups',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=64, verbose_name=b'\\xe7\\xbb\\x84\\xe5\\x90\\x8d')),\n ],\n ),\n migrations.AlterField(\n model_name='server',\n name='groups',\n field=models.ForeignKey(default=2, to='GAOGDS.ServerGroups'),\n preserve_default=False,\n ),\n ]\n","sub_path":"GAOGDS/migrations/0006_auto_20170216_1209.py","file_name":"0006_auto_20170216_1209.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"98838399","text":"#\n# You can raise your own exceptions in your code. Exceptions are raised with a raise statement.\n# In code, a raise statement consists of the following\n# The raise keyword\n# A call to the Exception() function\n# A string with a helpful error message passed to the Exception() function\n\nnumber = 10\n\ntry:\n if number > 5:\n raise Exception('Number is greater than 5')\nexcept Exception as err:\n print('An exception happened: ' + str(err))","sub_path":"PythonEssentials/Debugging/Raising_Exceptions.py","file_name":"Raising_Exceptions.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"230570889","text":"#!/usr/bin/env python3\n\nimport argparse\nimport sys\n\nfrom matplotlib import pyplot as plt\n\nfrom src.alg.kruskal import kruskal\nfrom src.alg.prim import prim\nfrom src.ds.graph import Graph\nfrom src.viz import show_mst\n\n\ndef main():\n # parse arguments\n args = parse_arguments()\n\n # load the graph\n try:\n G = Graph.from_file(args.graph)\n except Exception:\n abort(\"Error reading graph file\")\n\n # run the algorithm\n if args.kruskal:\n mst = kruskal(G)\n elif args.prim:\n mst = prim(G)\n else:\n abort(\"No algorithm specified\")\n\n # display the result MST edges\n print(\"The resulting min-spanning-tree contains following edges:\")\n for (u, v) in mst:\n print(u, v)\n\n # plot if asked\n if args.show_graph:\n show_mst(G, mst)\n plt.show()\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(\n description=(\"Find the minimum spanning tree of a connected weighted\"\n \"undirected graph\"),\n epilog=\"Author: Manish Munikar \")\n parser.add_argument(\"graph\", help=\"input graph file\")\n parser.add_argument(\n \"-k\", \"--kruskal\", action=\"store_true\",\n help=\"use Kruskal's algorithm\")\n parser.add_argument(\n \"-p\", \"--prim\", action=\"store_true\",\n help=\"use Prim's algorithm (default)\")\n parser.add_argument(\n \"-g\", \"--show-graph\", action=\"store_true\",\n help=\"display the result as a visual graph\")\n\n args = parser.parse_args()\n\n if args.kruskal and args.prim:\n abort(\"Please specify only one algorithm\")\n if not (args.kruskal or args.prim):\n args.prim = True\n\n return args\n\n\ndef abort(msg, code=1):\n print(msg, file=sys.stderr)\n sys.exit(code)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"484380963","text":"\n# coding: utf-8\n\n# In this kernel we do some exploratory data analysis of a kaggle dataset which explores customer churn for a telecoms company\n\n# In[111]:\n\n\nimport pandas as pd \nimport numpy as np \nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# In[112]:\n\n\ndf = pd.read_csv('WA_Fn-UseC_-Telco-Customer-Churn.csv')\ndf.head()\n\n\n# First, it makes sense to explore the columns and make sure they are understood and write down any initial thoughts. This was done outside of this notebook.\n# \n# __Customer ID:__\n# ID number for customer, not useful for analysis, unless it could be converted to some kind of ‘how long has this customer been with us metric,’ but we already have tenure. \n# \n# __Gender:__\n# Male or Female\n# \n# __SeniorCitizen:__\n# 0 or 1, is the customer a senior citizen. No info on age cutoff\n# \n# __Partner:__\n# Yes or no, assume refers to spouse or de-facto\n# \n# __Dependents:__\n# Yes or no, assume mainly refers to whether or not customer has children\n# \n# __Tenure:__\n# Integer value, refers to months as customer\n# \n# __PhoneService:__\n# Yes or no, does the customer have phone service\n# \n# __MultipleLines:__\n# Yes, no or No phone service. May make sense to classify last two as same thing\n# \n# __InternetService:__\n# DSL, Fiber Optic or No, refers to what type of internet service the person has\n# \n# __OnlineSecurity:__\n# Yes, no or No internet service. May make sense to classify last two as same thing\n# \n# __OnlineBackup:__\n# Yes, no or No internet service. May make sense to classify last two as same thing\n# \n# __DeviceProtection:__\n# Yes, no or No internet service. May make sense to classify last two as same thing\n# \n# __TechSupport:__\n# Yes, no or No internet service. May make sense to classify last two as same thing\n# \n# __StreamingTV:__\n# Yes, no or No internet service. May make sense to classify last two as same thing\n# \n# __StreamingMovies:__\n# Yes, no or No internet service. May make sense to classify last two as same thing\n# \n# __Contract:__\n# Month-to-month, One year, Two year. Specifies contract term\n# \n# __PaperlessBilling:__\n# Yes or no. Refers to whether the customer has paperless billing\n# \n# __PaymentMethod:__\n# Electronic check, Mailed check, Bank transfer (automatic), Credit card (automatic)\n# \n# __MonthlyCharges:__\n# Various float values, how much the customer pays monthly\n# \n# __TotalCharges:__\n# Various float values, how much the customer pays in total\n# \n# __Churn:__\n# Yes or no, whether the customer churns\n# \n# \n# Initial thoughts - customer ID will not be useful for analysis, but we should check to make sure that there aren’t any duplicates. There is little cleaning to do in this dataset, except that some of the category names can be shortened (remove ‘...service’ and ‘...automatic’). \n# \n# It may make sense to classify things like 'no internet service' and 'no' as the same thing but there is a real difference and they could probably be left as they are.\n# \n# Initially I was confused about the relationship between __PaperlessBilling__ and __PaymentMethod__, and how a person could not have paperless billing but still pay via bank transfer. This person would receive paper bills in the mail but still pay online, no relationship should be inferred between the two. \n\n# In[113]:\n\n\n# check for missing data:\ndf.isnull().sum()\n\n\n# This would of course be fairly atypical but helps us proceed quicker. The next step is to check the customer ID column for duplicates:\n\n# In[114]:\n\n\ndf['customerID'].duplicated().sum()\n\n\n# So we know that we aren't dealing with multiple instances of the same customer, and we don't need this column for analysis, so we can delete it\n\n# In[115]:\n\n\ndf = df.drop(['customerID'], axis = 1)\ndf.head()\n\n\n# A fairly minor cleaning job, which could actually have much larger effects in a big dataset, would be removing 'service' from columns where it occurs\n\n# In[116]:\n\n\ndf['MultipleLines'] = df.MultipleLines.str.replace(\" service\", \"\")\ndf['OnlineSecurity'] = df.OnlineSecurity.str.replace(\" service\", \"\")\ndf['OnlineBackup'] = df.OnlineBackup.str.replace(\" service\", \"\")\ndf['DeviceProtection'] = df.DeviceProtection.str.replace(\" service\", \"\")\ndf['TechSupport'] = df.TechSupport.str.replace(\" service\", \"\")\ndf['StreamingTV'] = df.StreamingTV.str.replace(\" service\", \"\")\ndf['StreamingMovies'] = df.StreamingMovies.str.replace(\" service\", \"\")\n\n\n# In[117]:\n\n\ndf.head(10)\n\n\n# It probably pays to investigate the __PaymentMethod__ column a little to see whether we need the '(automatic)' designation\n\n# In[118]:\n\n\ndf['PaymentMethod'].value_counts()\n\n\n# It seems like these distiguishing features could be useful so we will leave them as they are for now. For the sake of consistency, lets also make the SeniorCitizen column yes or no instead of 0 or 1\n\n# In[119]:\n\n\ndf['SeniorCitizen'].value_counts()\n\n\n# In[120]:\n\n\ndf['SeniorCitizen'] = df['SeniorCitizen'].astype(str)\ndf['SeniorCitizen'] = df['SeniorCitizen'].map({'0': 'No', '1': 'Yes'})\ndf.head()\n\n\n# In[121]:\n\n\ndf.dtypes\n\n\n# For some reason __TotalCharges__ is stored as a string, this is easily fixed:\n\n# In[122]:\n\n\ndf['TotalCharges'] = df['TotalCharges'].astype(float)\ndf.dtypes\n\n\n# Something in that column must not be in valid float form. Unfortunately, since there are 7000+ customers with fairly distinct values, we cant inspect them by looking at value counts\n\n# In[123]:\n\n\nnum_list = []\nbroken_list = []\n\nfor index,row in enumerate(df['TotalCharges']):\n try:\n num_list.append(float(row))\n except ValueError:\n print('Line {i} is corrupt'.format(i = index))\n broken_list.append(row)\n\n\n# In[124]:\n\n\nbroken_list\n\n\n# In[125]:\n\n\ndf.shape\n\n\n# So we have a bunch of empty strings. One thing we could do here is copy across the values from MonthlyCharges, this seems logical but actually only occurs less than 10% of the time. The best thing to do is probably delete the rows, which only make up a tiny fraction of the dataset\n\n# In[126]:\n\n\ndf['TotalCharges'].replace(' ', np.nan, inplace=True)\n\n\n# In[127]:\n\n\ndf['TotalCharges'] = df['TotalCharges'].astype(float)\n\n\n# In[128]:\n\n\ndf.dropna(subset=['TotalCharges'], inplace=True)\ndf.shape\n\n\n# In[129]:\n\n\ndf.dtypes\n\n\n# Now for some exploratory analysis. Since most of the columns are categoricals, we can produce a series of bar plots fairly quickly\n\n# In[130]:\n\n\nbars = df.drop(labels=['tenure', 'MonthlyCharges', 'TotalCharges'], axis=1)\nfor col in bars:\n plt.figure(figsize=(10,5));\n plt.title(col);\n sns.countplot(df[col], hue=df[\"Churn\"], );\n plt.show();\n\n\n# From a visual inspection, it would appear that __Dependents__, __InternetService__, __OnlineSecurity__, __Contract__, __OnlineBackup__, __DeviceProtection__, __TechSupport__, __Contract__, __PaymentMethod__ and __PaperlessBilling__ will all be reasonable predictors\n\n# Now to look at float and numerical variables:\n\n# In[131]:\n\n\nplt.figure(figsize=(20,5))\nsns.countplot(df[\"tenure\"])\n\n\n# This is mostly a fairly even distribution. The first thing to note is that the biggest groups of customers are those new to the company (1 - 4 months) and those who have been with the company for approximately 72 months. In this case I would guess that 72 months is either the point at which they stop counting or the maximum possible tenure (i.e. start of company). Its probably worth exploring this a bit more and seeing what churn looks like when we categorise customers into bins of approximately 6 months\n\n# In[132]:\n\n\ndef tenure_bins(month):\n if month >= 1 and month < 7:\n return '1-6'\n elif month >= 7 and month < 13:\n return '7-12'\n elif month >= 13 and month < 19:\n return '13-18'\n elif month >= 19 and month < 25:\n return '19-24'\n elif month >= 25 and month < 31:\n return '25-30'\n elif month >= 31 and month < 37:\n return '31-36'\n elif month >= 37 and month < 43:\n return '37-42'\n elif month >= 43 and month < 49:\n return '43-48'\n elif month >= 49 and month < 55:\n return '49-54'\n elif month >= 55 and month < 61:\n return '55-60'\n elif month >= 61 and month < 67:\n return '61-66'\n elif month >= 67 and month < 73:\n return '67-72+'\n else:\n return 'out of range'\n \ntenures = df['tenure']\ndf['tenure_labels'] = df['tenure'].apply(tenure_bins)\ndf.head()\n\n\n# In[133]:\n\n\nplt.figure(figsize=(20,5))\nsns.countplot(df[\"tenure_labels\"])\n\n\n# Lets have a look at how likely these groups are to churn\n\n# In[134]:\n\n\nplt.figure(figsize=(10,5))\nsns.countplot(df['tenure_labels'], hue=df[\"Churn\"]);\nplt.show()\n\n\n# This clearly illustrates to us that likelihood is highest at the beginning of the tenure. In fact, it is actually very low across the board after 18 months\n\n# Now to look at monthly charges:\n\n# In[135]:\n\n\nsns.distplot(df[\"MonthlyCharges\"])\n\n\n# Most are at the lower end of the scale. Is total charges similar?\n\n# In[136]:\n\n\nsns.distplot(df[\"TotalCharges\"])\n\n\n# Very much so. We would probably want to do some kind of (log) transformation before using these, but for now lets see if we can spot any trends\n\n# In[137]:\n\n\ndef monthly_bins(bill):\n if bill >= 0 and bill < 26:\n return '0-25'\n elif bill >= 26 and bill < 51:\n return '26-50'\n elif bill >= 51 and bill < 76:\n return '51-75'\n elif bill >= 76 and bill < 101:\n return '76-100'\n elif bill >= 101 and bill < 126:\n return '101-125'\n else:\n return 'out of range'\n \nbills = df['MonthlyCharges']\ndf['MonthlyCharge_labels'] = df['MonthlyCharges'].apply(monthly_bins)\ndf.head()\n\n\n# In[138]:\n\n\nplt.figure(figsize=(10,5))\nsns.countplot(df['MonthlyCharge_labels'], hue=df[\"Churn\"]);\nplt.show()\n\n\n# So it seems like customers at the very low range are most unlikely to churn, most likely are in the mid to upper range. Lets try the same thing for total charges:\n\n# In[139]:\n\n\ndef total_bins(bill):\n if bill >= 0 and bill < 501:\n return '0-500'\n elif bill >= 501 and bill < 1001:\n return '501-1000'\n elif bill >= 1001 and bill < 1501:\n return '1001-1500'\n elif bill >= 1501 and bill < 2001:\n return '1501-2000'\n elif bill >= 2001 and bill < 2501:\n return '2001-2500'\n elif bill >= 2501 and bill < 3001:\n return '2501-3000'\n elif bill >= 3001 and bill < 3501:\n return '3001-3500'\n elif bill >= 3501 and bill < 4001:\n return '3501-4000'\n elif bill >= 4001 and bill < 4501:\n return '4001-4500'\n elif bill >= 4501 and bill < 5001:\n return '4501-5000'\n elif bill >= 5001 and bill < 5501:\n return '5001-5500'\n elif bill >= 5501 and bill < 6001:\n return '5501-6000'\n elif bill >= 6001 and bill < 6501:\n return '6001-6500'\n elif bill >= 6501 and bill < 7001:\n return '6501-7000'\n elif bill >= 7001 and bill < 7501:\n return '7001-7500'\n elif bill >= 7501 and bill < 8001:\n return '7501-8000'\n elif bill >= 8001 and bill < 8501:\n return '8001-8500'\n elif bill >= 8501 and bill < 9001:\n return '8501-9000'\n elif bill >= 9001 and bill < 9501:\n return '9001-9500'\n elif bill >= 9501 and bill < 10000:\n return '9501-10000'\n else:\n return '10000+'\n \nbills = df['TotalCharges']\ndf['TotalCharge_labels'] = df['TotalCharges'].apply(total_bins)\ndf.head()\n\n\n# In[140]:\n\n\nplt.figure(figsize=(20,5))\nsns.countplot(df['TotalCharge_labels'], hue=df[\"Churn\"]);\nplt.show()\n\n\n# Churn looks similar across the board, except in the 0-500 bracket where it is relatively high\n\n# As far as data exploration goes, this is the end of the line. In terms of getting variables ready for ML, categorical variables would have to be encoded and numerical variables may need to be transformed, depending on the algorithm chosen. Same would go for scaling of the numerical variables, but it depends on the method. Once features are encoded, a feature adding the internet addons could be added, which may also give us further information on what type of people churn. But here are some takeaways from this short analysis:\n# \n# * There is no single feature which is a really obvious predictor for churning\n# * Not having internet service makes a customer more unlikely to churn\n# * Being on a contract, as expected, makes a customer more unlikely to churn\n# * Paying by electronic check makes a customer more likely to churn\n# * Being at the beginning of a contract, especially within the first 6 months, makes a customer much more likely to churn\n# * Customers in the 0-25 payment bracket are very unlikely to churn, the most likely bracket is 75-100\n# * The most likely bracket for churn in terms of yearly payments is 0-500\n\n# Once this dataset is ready to be analysed, good candidates for a ML model would be __logistic regression__, __decision trees__ or a __random forest__\n","sub_path":"Data Exploration.py","file_name":"Data Exploration.py","file_ext":"py","file_size_in_byte":12860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"227914294","text":"from google.appengine.ext import db\nfrom google.appengine.ext.search import SearchableModel\nfrom external.markdown2 import markdown\nfrom datetime import date\nfrom datetime import datetime\nimport re\nfrom google.appengine.api import memcache\nimport sys, logging\n\ndef archive_list():\n\tdata = memcache.get(\"archive_list\")\n\tif data is None:\n\t\tall = Post.all().filter('status =', 'Published').order('-posted_on')\n\t\tdata = {}\n\t\tfor p in all:\n\t\t\tdate = p.posted_on\n\t\t\tif date.year in data:\n\t\t\t\tdata[date.year]['count'] += 1\n\t\t\telse:\n\t\t\t\tdata[date.year] = {'count': 1, 'months' : {}}\n\t\t\tmonth = int(date.strftime('%m'))\n\t\t\tif month in data[date.year]['months']:\n\t\t\t\tdata[date.year]['months'][month][1] += 1\n\t\t\telse:\n\t\t\t\tdata[date.year]['months'][month] = [date.strftime('%B'), 1]\n\t\tmemcache.add(\"archive_list\", data, 60*60*365)\n\tlogging.warn(data)\n\treturn data\n\nclass Post(SearchableModel):\n\tunsearchable_properties = ['slug', 'html', 'status']\n\tcontent = db.TextProperty(required=True)\n\tposted_on = db.DateTimeProperty(required=True, auto_now_add=True)\n\tmodified_on = db.DateTimeProperty(auto_now=True)\n\ttitle = db.StringProperty(required=True)\n\tslug = db.StringProperty()\n\thtml = db.TextProperty()\n\tstatus = db.StringProperty()\n\ttags = db.StringListProperty()\n\t\n\tdef num_comments(self):\n\t\t\"\"\"\n\t\tGets the number of comment associated with this post\n\t\t\"\"\"\n\t\tfrom model.comment import Comment\n\t\treturn Comment.count_for(self)\n\t\n\tdef ordered_comments(self, dir=\"ASC\"):\n\t\t# this import cannot be in the header because\n\t\t# it causes a circular import and blows up\n\t\tfrom model.comment import Comment\n\t\tcomments = Comment.all().filter(\"post =\", self)\n\t\tif dir == \"DESC\":\n\t\t\treturn comments.order(\"-posted_at\")\n\t\telse:\n\t\t\treturn comments.order(\"posted_at\")\n\n\tdef create_slug(self, slug):\n\t\t\"\"\"Creates a nice slug based on the title\n\t\t\n\t\tThis function is based on http://www.djangosnippets.org/snippets/29/\"\"\"\n\t\tif not slug:\n\t\t\taslug = self.title\n\t\telse:\n\t\t\taslug = slug\n\t\t\n\t\tremovelist = []; #[\"a\", \"an\", \"as\", \"at\", \"before\", \"but\", \"by\", \"for\",\"from\",\"is\", \"in\", \"into\", \"like\", \"of\", \"off\", \"on\", \"onto\",\"per\",\"since\", \"than\", \"the\", \"this\", \"that\", \"to\", \"up\", \"via\",\"with\"];\n\t\tfor a in removelist:\n\t\t\taslug = re.sub(r'\\b'+a+r'\\b','',aslug)\n\t\taslug = re.sub('[^\\w\\s-]', '', aslug).strip().lower()\n\t\taslug = re.sub('\\s+', '-', aslug)\n\t\tself.slug = aslug\n\t\n\tdef create_html(self):\n\t\t\"\"\"Process the input content as markdown \"\"\"\n\t\tself.html = markdown(self.content)\n\t\n\tdef put(self):\n\t\tself.create_slug(self.slug)\n\t\tself.process_tags()\n\t\tself.create_html()\n\t\tif not self.status:\n\t\t\tself.status = 'Draft'\n\t\tsuper(Post, self).put()\n\t\tmemcache.delete(\"archive_list\")\n\t\t\n\tdef process_tags(self):\n\t\t\"\"\" This ensures the tag count is correct and that all tags are set.\"\"\"\n\t\tpass\n\t\t\t\n\t\n\tdef get_archive(year=None, month=None, day=None):\n\t\ttoday = date.today()\n\t\t# by default only get this year\n\t\tstart_year = today.year\n\t\tstart_month = 1\n\t\tstart_day = 1\n\t\tend_year = today.year + 1\n\t\tend_month = 1\n\t\tend_day = 1\n\t\t\n\t\tif year:\n\t\t\tstart_year = int(year)\n\t\t\tend_year = int(year)\n\t\t\tif month:\n\t\t\t\tstart_month = int(month)\n\t\t\t\tend_month = int(month)\n\t\t\t\tif day:\n\t\t\t\t\tstart_day = int(day)\n\t\t\t\t\tend_day = int(day) + 1\n\t\t\t\telse:\n\t\t\t\t\tend_month = int(month) + 1\n\t\t\telse:\n\t\t\t\tend_year = int(year) + 1\n\t\t\n\t\t\n\t\tstart = datetime(start_year, start_month, start_day, 0, 0, 0)\n\t\ttry:\n\t\t\tend = datetime(end_year, end_month, end_day, 0, 0, 0)\n\t\texcept ValueError:\n\t\t\t# this is likely to happen at the end of the month \n\t\t\t# e.g Today is 2010/05/31 so end date is + 1 day\n\t\t\tif month < 12:\n\t\t\t\tend = datetime(end_year, end_month + 1, 1, 0, 0, 0)\n\t\t\telse:\n\t\t\t\tend = datetime(end_year + 1, 1, 1, 0, 0, 0)\n\t\treturn Post.all().filter('status =', 'Published').filter('posted_on >=', start).filter('posted_on <', end) \n\t\n\tget_archive = staticmethod(get_archive)\n\t\t\t\n","sub_path":"application/model/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":3820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"543883893","text":"# !/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n\r\n\"\"\"\r\nstruct内置模块:\r\n pack(fmt, *args)\r\n unpack(fmt, string)\r\n\"\"\"\r\n\r\nimport struct\r\n\r\n# 将Python数据转换为二进制数据\r\ndata=struct.pack('>i2sh',1,b'Hi',2) # i:整数 2s: 2个字符的字符串 h:高位在前---big-endian\r\n# 保存到文件中\r\nwith open('data.bin',mode='wb') as fw:\r\n fw.write(data)\r\n\r\n# 从文件中读取二进制数据\r\nwith open('data.bin',mode='rb') as fr:\r\n data=fr.read()\r\n # 将二进制数据转换为Python数据\r\n data=struct.unpack('>i2sh',data)\r\n print(data)\r\n","sub_path":"08_应用主题/数据序列化与持久化/struct/example01.py","file_name":"example01.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"380730149","text":"# encoding: utf-8\r\nimport pandas as pd\r\nfrom sklearn.cross_validation import StratifiedShuffleSplit as SSS\r\n\r\n\"\"\"\r\n 一些协助预处理数据的函数\r\n 下面函数中的 df 都是 pandas 的 dataframe 变量\r\n\"\"\"\r\n\r\n\r\ndef get_numeric_fields(df):\r\n return df._get_numeric_data().columns\r\n\r\n\r\ndef get_categorical_fields(df, tag_field=None):\r\n \"\"\"\r\n tag_field is always categorical, so ignore it\r\n \"\"\"\r\n numerics = get_numeric_fields(df)\r\n # \"-\" is deprecated, use difference() instead\r\n categoricals = df.columns.difference(numerics)\r\n if tag_field:\r\n categoricals = categoricals.difference([tag_field])\r\n return categoricals\r\n\r\n\r\ndef flatten_categorial_fields(df, cate_fields, other_fields=None, dummy_na=True):\r\n \"\"\"\r\n Example:\r\n df\r\n f1 f2\r\n 0 1 T\r\n 1 2 T\r\n 2 3 S\r\n pd.get_dummies(df, 'f2', dummy_na=True)\r\n f1 f2_S f2_T f2_nan\r\n 0 1 0 1 0\r\n 1 2 0 1 0\r\n 2 3 1 0 0\r\n pd.get_dummies(df, 'f2')\r\n f1 f2_S f2_T\r\n 0 1 0 1\r\n 1 2 0 1\r\n 2 3 1 0\r\n\r\n other_fields 是可能被误认为数字类型的字段,比如年月日,也按 categorical 处理\r\n \"\"\"\r\n # “+” is deprecated, use '|' or .union() instead\r\n fields = cate_fields | other_fields if other_fields else cate_fields\r\n return pd.get_dummies(df, fields, dummy_na=dummy_na)\r\n\r\n\r\ndef split_date_fields(df, dfield, yfield='Year', mfield='Month', wfield='Weekday'):\r\n \"\"\"\r\n 把日期字段都拆分成年字段、月字段和周字段\r\n 不拆分为日字段,统计意义比较弱\r\n 要求 dfield 是 YYYY-MM-DD 的格式\r\n \"\"\"\r\n # 其实,df[dfield] 本身已经是 pd.Series 类型了,其实貌似不必加转换\r\n df[dfield] = pd.to_datetime(pd.Series(df[dfield]))\r\n df[yfield] = df[dfield].apply(lambda x: int(str(x)[:4]))\r\n df[mfield] = df[dfield].apply(lambda x: int(str(x)[5:7]))\r\n df[wfield] = df[dfield].dt.dayofweek\r\n # 删掉原来的日期字段\r\n df = df.drop(dfield, axis=1)\r\n return df\r\n\r\n\r\ndef transform_labels(df, other_fields=[]):\r\n \"\"\"\r\n 同为处理 categorical features 的方式,和 flatten_categorial_fields 略不同\r\n 后者把 feature 的可选值单拎出来作为新 feature,feature 值为是否等于该值,0/1\r\n 而本函数只是把不同的可选值按顺序修改为 0,1,2,....\r\n \"\"\"\r\n from sklearn import preprocessing\r\n for f in df.columns:\r\n if df[f].dtype == 'object' or f in other_fields:\r\n lbl = preprocessing.LabelEncoder()\r\n lbl.fit(list(df[f].values))\r\n df[f] = lbl.transform(list(df[f].values))\r\n\r\n\r\ndef train_test_split(y, test_size=0.1, random_state=1234, n_iter=1):\r\n splits = SSS(y, test_size=test_size, random_state=random_state, n_iter=n_iter)\r\n for train_index, test_index in splits:\r\n return train_index, test_index\r\n","sub_path":"src/armory/preproc.py","file_name":"preproc.py","file_ext":"py","file_size_in_byte":3041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"600308013","text":"import logging\n\nfrom dateutil.parser import parse\nfrom .utils import ApiComponent\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Info(ApiComponent):\n \"\"\" A Microsoft info class\n In order to use the API following permissions are required.\n Delegated (work or school account) - Group.Read.All, Group.ReadWrite.All\n \"\"\"\n\n _endpoints = {\n 'info': '/me',\n }\n\n def __init__(self, *, parent=None, con=None, **kwargs):\n \"\"\" A Planner object\n\n :param parent: parent object\n :type parent: Account\n :param Connection con: connection to use if no parent specified\n :param Protocol protocol: protocol to use if no parent specified\n (kwargs)\n :param str main_resource: use this resource instead of parent resource\n (kwargs)\n \"\"\"\n assert parent or con, 'Need a parent or a connection'\n self.con = parent.con if parent else con\n\n super().__init__(\n protocol=parent.protocol if parent else kwargs.get('protocol'),\n main_resource=kwargs.get('main_resource', ''))\n\n def __str__(self):\n return self.__repr__()\n\n def __repr__(self):\n return 'Microsoft Planner'\n\n def get_my_info(self):\n \"\"\" Returns a my info.\"\"\"\n url = self.build_url(self._endpoints.get('info'))\n response = self.con.get(url)\n if not response:\n return None\n return self._get_my_info(response.json())\n\n async def aio_get_my_info(self):\n \"\"\" Returns my Info.\"\"\"\n url = self.build_url(self._endpoints.get('info'))\n print(url)\n response = await self.con.get(url)\n if not response:\n return None\n data = await response.json()\n return self._get_my_info(data)\n\n def _get_my_info(self, data):\n \"\"\"post-process data.\"\"\"\n return data\n","sub_path":"O365/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"196042120","text":"from django.shortcuts import redirect\nfrom flask import Flask, make_response, request, session, render_template, abort, jsonify, url_for, send_from_directory, \\\n flash\nfrom flask_login import LoginManager, login_user, current_user, login_required, logout_user\nfrom data import db_session\nfrom data.users import User\nfrom data.new_from import NewsForm\nimport datetime\nfrom werkzeug.utils import redirect\nfrom loginform import LoginForm, psw\nfrom registerform import RegisterForm\nfrom data.news import News\nfrom flask_restful import reqparse, abort, Api, Resource\nfrom data import news_api, user_api\nimport random\n\n\n\n\ndb_session.global_init(\"db/users.sqlite\")\napp = Flask(__name__)\n\napp.config['SECRET_KEY'] = 'yandexlyceum_secret_key'\napp.config['PERMANENT_SESSION_LIFETIME'] = datetime.timedelta(days=365)\n\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\n\n@app.route(\"/\")\ndef index():\n session = db_session.create_session()\n news = session.query(News)[::-1]\n\n return render_template('index.html', news=news, ln=len(news))\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n session = db_session.create_session()\n return session.query(User).get(user_id)\n\n\n@app.route('/logout')\n@login_required\ndef logout():\n logout_user()\n return redirect(\"/\")\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n session = db_session.create_session()\n user = session.query(User).filter(User.email == form.email.data).first()\n if user and user.check_password(form.password.data):\n login_user(user, remember=form.remember_me.data)\n return redirect('/')\n return render_template('login.html',\n message=\"Неправильный логин или пароль\",\n form=form)\n return render_template('login.html', title='Авторизация', form=form)\n\n@app.route('/login_2', methods=['GET', 'POST'])\ndef login_2(mail):\n a = ''\n for i in range(5):\n a += str(random.randint(0, 9))\n print(a)\n form = psw()\n if form.validate_on_submit():\n return redirect('/')\n return render_template('login_2.html', title='Авторизация', form=form)\n\n@app.route('/uploads/')\ndef uploaded_file(filename):\n return send_from_directory(app.config['UPLOAD_FOLDER'],\n filename)\n\n@app.route('/register', methods=['GET', 'POST'])\ndef reqister():\n form = RegisterForm()\n if form.validate_on_submit():\n if form.password.data != form.password_again.data:\n return render_template('register.html', title='Регистрация',\n form=form,\n message=\"Пароли не совпадают\")\n session = db_session.create_session()\n if session.query(User).filter(User.email == form.email.data).first():\n return render_template('register.html', title='Регистрация',\n form=form,\n message=\"Такой адрес почты уже занят\")\n if session.query(User).filter(User.name == form.name.data).first():\n return render_template('register.html', title='Регистрация',\n form=form,\n message=\"Такое имя уже занято\")\n user = User(\n name=form.name.data,\n email=form.email.data\n )\n user.set_password(form.password.data)\n session.add(user)\n session.commit()\n return redirect('/')\n return render_template('register.html', title='Регистрация', form=form)\n\n\n@app.route('/user/')\n@login_required\ndef user():\n return render_template('user.html')\n\n@app.route('/news', methods=['GET', 'POST'])\n@login_required\ndef add_news():\n form = NewsForm()\n if form.validate_on_submit():\n session = db_session.create_session()\n news = News()\n news.title = form.title.data\n news.content = form.content.data\n current_user.news.append(news)\n session.merge(current_user)\n session.commit()\n return redirect('/')\n return render_template('news.html', title='Добавление новости',\n form=form)\n\n@app.route('/news/', methods=['GET', 'POST'])\n@login_required\ndef edit_news(id):\n form = NewsForm()\n if request.method == \"GET\":\n session = db_session.create_session()\n news = session.query(News).filter(News.id == id,\n (News.user == current_user) |\n (current_user.id == 1)).first()\n if news:\n form.title.data = news.title\n form.content.data = news.content\n else:\n abort(404)\n if form.validate_on_submit():\n session = db_session.create_session()\n news = session.query(News).filter(News.id == id,\n (News.user == current_user) |\n (current_user.id == 1)).first()\n if news:\n news.title = form.title.data\n news.content = form.content.data\n session.commit()\n return redirect('/')\n else:\n abort(404)\n return render_template('news.html', title='Редактирование новости', form=form)\n\n@app.route('/news_delete/', methods=['GET', 'POST'])\n@login_required\ndef news_delete(id):\n session = db_session.create_session()\n news = session.query(News).filter(News.id == id,\n (News.user == current_user) |\n (current_user.id == 1)).first()\n if news:\n session.delete(news)\n session.commit()\n else:\n abort(404)\n return redirect('/')\n\n\n'''def main():\n app.register_blueprint(user_api.blueprint)\n app.register_blueprint(news_api.blueprint)\n app.run()'''\n\n\nif __name__ == '__main__':\n # main()\n app.run(port=\"8080\", host='127.0.0.1')","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"411963380","text":"def computepay(a, b):\r\n if a<=40 :\r\n gp = (a*b)\r\n return gp\r\n else :\r\n gp=((40*b)+((a-40)*(b*1.5)))\r\n return gp\r\n\r\nHours=input('Enter hours')\r\na=float(Hours)\r\nRate=input('Enter rate per hour')\r\nb=float(Rate)\r\n\r\ngp=computepay(a, b)\r\nprint(\"Pay\", gp)\r\n","sub_path":"Ex_6.1.py","file_name":"Ex_6.1.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"362983749","text":"import tkinter as tk\nfrom tkinter import messagebox\n\nUserName = \"\"\n\nclass MyCollectApp(tk.Toplevel):#重点\n def __init__(self):\n super().__init__() #重点\n self.title('用户信息')\n self.setupUI()\n\n def setupUI(self):\n row1 = tk.Frame(self)\n row1.pack(fill=\"x\")\n l1 = tk.Label(row1, text=\"用户名:\",height=2,width=10)\n l1.pack(side=tk.LEFT) # 这里的side可以赋值为LEFT RTGHT TOP BOTTOM\n self.xls_text = tk.StringVar()\n tk.Entry(row1, textvariable=self.xls_text).pack(side=tk.RIGHT)\n\n row2 = tk.Frame(self)\n row2.pack(fill=\"x\")\n tk.Button(row2, text=\"点击确认\", command=self.on_click).pack(side=tk.RIGHT)\n\n def on_click(self):\n #print(self.xls_text.get().lstrip())\n global UserName\n UserName = self.xls_text.get().lstrip()\n if len(UserName) == 0:\n #print(\"用户名必须输入!\")\n messagebox.showwarning(title='系统提示',message='请输入用户名!')\n return False\n self.quit()\n self.destroy()\n print(\"用户名:%s\" % (UserName))\n\nif __name__ == '__main__':\n var_box = tk.messagebox.askyesno(title='系统提示', message='验证邮箱') # 返回'True','False'\n if var_box:\n app = MyCollectApp()\n app.mainloop()\n else:\n print('不做处理')\n\n\n\n\n\n","sub_path":"my/py_to_exe.py","file_name":"py_to_exe.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"156956957","text":"__author__ = 'brentdecracker'\n\nimport sqlite3\nimport NSDAPointer.Database_Access.__init__\n\nfrom itertools import groupby\nimport io\nfrom contextlib import redirect_stdout\n# from tkintertable import TableCanvas, TableModel\n\n\n\n\nclass RoundAnalysis(object):\n def __init__(self, team, rd1win, rd1loss, rd2win, rd2loss, rd3win, rd3loss, rd4win, rd4loss, rd5win, rd5loss):\n self.team = team\n self.rd1win = rd1win\n self.rd1loss = rd1loss\n self.rd2win = rd2win\n self.rd2loss = rd2loss\n self.rd3win = rd3win\n self.rd3loss = rd3loss\n self.rd4win = rd4win\n self.rd4loss = rd4loss\n self.rd5win = rd5win\n self.rd5loss = rd5loss\n\ndef get_rounds_analysis():\n with sqlite3.connect(NSDAPointer.Database_Access.db_filename) as conn:\n cursor = conn.cursor()\n cursor.execute(\"select Team, Round, Result from Rounds\")\n data = cursor.fetchall()\n results = []\n\n for key, group in groupby(data, lambda x: x[0]):\n\n round_1_win = 0\n round_2_win = 0\n round_3_win = 0\n round_4_win = 0\n round_5_win = 0\n round_1_loss = 0\n round_2_loss = 0\n round_3_loss = 0\n round_4_loss = 0\n round_5_loss = 0\n for data in group:\n if data[1] == 1:\n if data[2] == \"W\":\n round_1_win += 1\n else:\n round_1_loss += 1\n if data[1] == 2:\n if data[2] == \"W\":\n round_2_win += 1\n else:\n round_2_loss += 1\n if data[1] == 3:\n if data[2] == \"W\":\n round_3_win += 1\n else:\n round_3_loss += 1\n if data[1] == 4:\n if data[2] == \"W\":\n round_4_win += 1\n else:\n round_4_loss += 1\n if data[1] == 5:\n if data[2] == \"W\":\n round_5_win += 1\n if data[2] == \"L\":\n round_5_loss += 1\n\n results.append(key)\n results.append(round_1_win)\n results.append(round_1_loss)\n results.append(round_2_win)\n results.append(round_2_loss)\n results.append(round_3_win)\n results.append(round_3_loss)\n results.append(round_4_win)\n results.append(round_4_loss)\n results.append(round_5_win)\n results.append(round_5_loss)\n cursor.close()\n\n return results\n\ndef rounds_analysis_report():\n rounds_analysis = get_rounds_analysis()\n rounds = list()\n for team, rd1win, rd1loss, rd2win, rd2loss, rd3win, rd3loss, rd4win, rd4loss, rd5win, rd5loss in (\n rounds_analysis[i:i + 11] for i in range(0, len(rounds_analysis), 11)):\n rounds.append(\n RoundAnalysis(team, rd1win, rd1loss, rd2win, rd2loss, rd3win, rd3loss, rd4win, rd4loss, rd5win, rd5loss))\n\n # file = io.StringIO()\n # with redirect_stdout(file):\n for i in range(len(rounds)):\n print(\n \"{0}\\n Round 1\\n Win = {1}\\n Loss = {2}\\n Round 2\\n Win = {3}\\n Loss = {4}\\n Round 3\\n Win = {5}\\n Loss = {6}\\n Round 4\\n Win = {7}\\n Loss = {8}\\n Round 5\\n Win = {9}\\n Loss = {10}\\n\\n\".format(\n str(rounds[i].team), str(rounds[i].rd1win), str(rounds[i].rd1loss), str(rounds[i].rd2win),\n str(rounds[i].rd2loss), str(rounds[i].rd3win), str(rounds[i].rd3loss), str(rounds[i].rd4win),\n str(rounds[i].rd4loss), str(rounds[i].rd5win), str(rounds[i].rd5loss)))\n\n analysisdict={}\n fullanalysis={}\n for i in range(len(rounds)):\n analysisdict['rd1win']=rounds[i].rd1win\n analysisdict['rd1loss']=rounds[i].rd1loss\n analysisdict['rd2win']=rounds[i].rd2win\n analysisdict['rd2loss']=rounds[i].rd2loss\n analysisdict['rd3win']=rounds[i].rd3win\n analysisdict['rd3loss']=rounds[i].rd3loss\n analysisdict['rd4win']=rounds[i].rd4win\n analysisdict['rd4loss']=rounds[i].rd4loss\n analysisdict['rd5win']=rounds[i].rd5win\n analysisdict['rd5loss']=rounds[i].rd5loss\n analysisdict['team']=rounds[i].team\n for i in range(len(rounds)):\n fullanalysis[rounds[i].team]=analysisdict\n\n\n # # app.raoutput.delete('0', 'end-1c')\n # app.raoutput.insert(file, 1000)\n\n # tframe = Frame(master)\n # tframe.pack()\n # table = TableCanvas(tframe)\n # table.createTableFrame()\n # model = TableModel()\n # table = TableCanvas(frame, model=model)\n # table.redrawTable()\n\n","sub_path":"NSDAPointer/Reports_And_Analysis/rounds_analysis.py","file_name":"rounds_analysis.py","file_ext":"py","file_size_in_byte":4666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"578331590","text":"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:\n# work with python 2.7\n\n\nclass process:\n\n def __init__(self):\n\n self.db = {}\n self.db['60'] = []\n self.db['71'] = []\n self.db['81'] = []\n self.db['91'] = []\n \n def read_date(self,line):\n line = line.strip(\"\\r\\n\")\n k = line.split(\" \")\n k[1] = float(k[1])\n s = k[1]\n if s > 60 and s <= 70:\n self.db['60'].append(k)\n \n\n if s > 70 and s <= 80:\n self.db['71'].append(k)\n \n\n if s > 80 and s <= 90:\n self.db['81'].append(k)\n\n\n if s > 90 and s <=100:\n self.db['91'].append(k)\n \n def write(self, name):\n fp = open(name,\"w\")\n fp.write(\"[61-70]\\n\")\n self.db['60'] = sorted(self.db['60'], key=lambda k: k[1]) \n for i in self.db['60']:\n fp.write(\"%25s%5.1f\\n\"% (i[0], i[1]))\n \n fp.write(\"(70-80]\\n\")\n self.db['71'] = sorted(self.db['71'], key=lambda k: k[1])\n for i in self.db['71']:\n fp.write(\"%25s%5.1f\\n\"% (i[0], i[1]))\n fp.write(\"(80-90]\\n\")\n self.db['81'] = sorted(self.db['81'], key=lambda k: k[1])\n for i in self.db['81']:\n fp.write(\"%25s%5.1f\\n\"% (i[0], i[1]))\n fp.write(\"(90-100]\\n\")\n self.db['91'] = sorted(self.db['91'], key=lambda k: k[1])\n for i in self.db['91']:\n fp.write(\"%25s%5.1f\\n\"% (i[0], i[1]))\n\n fp.close()\n\n\ndef main():\n '''main: Main function\n\n Description goes here.\n '''\n file_name = \"rainfall.txt\"\n out = \"rainfallfmt.txt\"\n p = process()\n for i in open(file_name, \"r\"):\n p.read_date(i)\n #print p.db \n p.write( out )\n\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"LAB1/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"336527327","text":"#kategoriaA.txt\n#kategoriaB.txt\n#kategoriaC.txt\n#KATEGORIAD.txt\n\nimport sys, re,os\n\nscript_dir = os.path.dirname(__file__)\n\n\n\nif len(sys.argv) !=2:\n raise ValueError(\"Wymagane podanie nazwy AB_XXXX \")\nelse:\n szukanywzorzec=sys.argv[1];\n\nprint (\"Analiza dla =>\" + szukanywzorzec)\nkatA_str=open(os.path.join(script_dir, \"wejscie/kategoriaA.txt\")).read()\nkatA=katA_str.split(\",\")\n\nkatB_str=open(os.path.join(script_dir,\"wejscie/kategoriaB.txt\")).read()\nkatB = katB_str.split(\",\")\n\nkatC_str=open(os.path.join(script_dir,\"wejscie/kategoriaC.txt\")).read()\nkatC = katC_str.split(\",\")\n\nkatD_str= open(os.path.join(script_dir,\"wejscie/KATEGORIAD.txt\")).read()\nkatD =katD_str.split(\",\")\n\n\n\n\nprint(\"\\n**** WYNIK ****\")\nif(szukanywzorzec in katA):\n print(szukanywzorzec + \"=>\" + \"KATA\")\nif (szukanywzorzec in katB):\n print(szukanywzorzec + \"=>\" + \"KAT_B\")\nelif (szukanywzorzec in katC):\n print(szukanywzorzec + \"=>\" + \"KAT_C\")\nelif (szukanywzorzec in katD):\n print(szukanywzorzec + \"=>\" + \"KAD_D\")\nelse :\n print (\"Nazwa nieodnaleziona\")\nprint(\"***************\")\n","sub_path":"laboratorium/klasyfikacja_do_zbioru/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"407065693","text":"#Author: sw180283\n# import all these things so that the .pv file can access them\n\nimport random # import random allows for random generation of values\nimport json\n\nfrom kivy.app import App\nfrom kivy.uix.widget import Widget\nfrom kivy.core.window import Window\nfrom kivy.uix.image import Image\nfrom kivy.clock import Clock\nfrom kivy.uix.label import Label\nfrom kivy.core.audio import SoundLoader\nfrom kivy.uix.button import Button\nfrom kivy.network.urlrequest import UrlRequest\n\n'''\nSet the location and names for each of the sounds.\n'''\nsfx_flap = SoundLoader.load(\"Resources/audio/flap.wav\")\nsfx_score = SoundLoader.load(\"Resources/audio/score.wav\")\nsfx_die = SoundLoader.load(\"Resources/audio/Owl_music.wav\")\n\n'''\nCreate the main menu and add the background, ground and label.\nSuper makes sure the widget is initialised.\n'''\nclass Menu(Widget):\n def __init__(self):\n super(Menu, self).__init__()\n self.add_widget(Sprite(source=\"Resources/images/metal_background.png\"))\n self.size = self.children[0].size\n self.add_widget(Ground(source=\"Resources/images/metal_ground.png\"))\n self.add_widget(Label(center=self.center, text=\"METAL\" + \"\\n\" + \" GEAR\" + \"\\n\" + \" OWL\"))\n self.add_widget(Label(pos=(self.center_x-44, self.center_y-140), text=\"Tap to start\"))\n\n '''\n When the user pressed down on touch screen or mouse click remove the main menu widget and add the game widget.\n '''\n def on_touch_down(self, *ignore):\n parent = self.parent\n parent.remove_widget(self)\n parent.add_widget(Game())\n\n'''\nUser database to get highscores.\n'''\nclass Scores(Widget):\n def __init__(self):\n super(Scores, self).__init__()\n self.add_widget(Sprite(source=\"Resources/images/metal_background.png\"))\n self.size = self.children[0].size\n\n self.add_widget(Sprite(source=\"Resources/images/metal_background.png\"))\n self.add_widget(Ground(source=\"Resources/images/metal_ground.png\"))\n\n '''\n Top score button display.\n '''\n self.btn_top_score = Button(pos=(self.center_x-120, self.center_y+90), text=\"Get Highscore\", font_size=4, size=(240,50))\n self.btn_top_score.bind(on_press=self.callback_top_score)\n self.add_widget(self.btn_top_score)\n\n '''\n Top scores button display.\n '''\n self.btn_top_scores = Button(pos=(self.center_x-120, self.center_y+40), text=\"Get Top 10\", font_size=4, size=(240,50))\n self.btn_top_scores.bind(on_press=self.callback_top_scores)\n self.add_widget(self.btn_top_scores)\n\n #first_name_top_score\n self.label_top_scores_first=Label(pos=(self.center_x-140, self.center_y-90), text=\"\", font_size='14sp')\n self.add_widget(self.label_top_scores_first)\n\n #last_name_top_score\n self.label_top_scores_last=Label(pos=(self.center_x-40, self.center_y-90), text=\"\", font_size='14sp')\n self.add_widget(self.label_top_scores_last)\n\n #score_name_top_score\n self.label_top_scores_score=Label(pos=(self.center_x+40, self.center_y-90), text=\"\", font_size='14sp')\n self.add_widget(self.label_top_scores_score)\n\n '''\n Menu screen button display.\n '''\n self.btn_menu = Button(pos=(self.center_x-120, self.center_y-190), text=\"Main Menu\", font_size=4, size=(240,50))\n self.btn_menu.bind(on_press=self.change)\n self.add_widget(self.btn_menu)\n\n '''\n Change the list of buttons displayed.\n '''\n def change(self, event):\n parent = self.parent\n parent.remove_widget(self)\n parent.add_widget(Menu())\n\n '''\n For top score get the results from the database.\n '''\n def got_database_top_score(self, request, results):\n self.btn_top_score.text=results\n\n def callback_top_score(self, event):\n request = UrlRequest('http://bsccg03.ga.fal.io/?request=top_score', self.got_database_top_score)\n\n '''\n For top 10 scores get the results from the database.\n '''\n def got_database_top_scores(self, request, results):\n print(results)\n\n parse_json = json.loads(results)\n print(parse_json)\n\n #first_name\n first=\"\\n\".join(str(result[0]) for result in parse_json)\n self.label_top_scores_first.text=first\n\n #last_name\n last=\"\\n\".join(str(result[1]) for result in parse_json)\n self.label_top_scores_last.text=last\n\n #score\n score=\"\\n\".join(str(result[2]) for result in parse_json)\n self.label_top_scores_score.text=score\n\n def callback_top_scores(self, event):\n request = UrlRequest('http://bsccg03.ga.fal.io/?request=top_scores', self.got_database_top_scores)\n\n'''\nSet the class sprite as an image and name the size of it the size of the texture of the image.\n'''\nclass Sprite(Image):\n def __init__(self, **kwargs):\n super(Sprite, self).__init__(**kwargs)\n self.size = self.texture_size\n\n'''\nThis widget controls both the top and the bottom spikes as one set.\nThe top of the image is 5.5 x the size of the player.\n'''\nclass Pipe(Widget):\n def __init__(self, pos):\n super(Pipe, self).__init__(pos=pos)\n distance_y= 90\n increase_distance=40\n self.top_image = Sprite(source=\"Resources/images/spike_top.png\")\n self.top_image.pos = (self.x, self.y + distance_y)\n self.add_widget(self.top_image)\n self.bottom_image = Sprite(source=\"Resources/images/spike_bottom.png\")\n self.bottom_image.pos = (self.x, self.y - self.top_image.height - increase_distance)\n self.add_widget(self.bottom_image)\n self.width = self.bottom_image.width\n\n '''\n Update the spikes and moves them 2 pixels to the left.\n Set the top spike and bottom spike as the new x position.\n If the spike x position is less than 0 remove the widget.\n '''\n def update(self):\n self.x -= 2\n self.top_image.x = self.bottom_image.x = self.x\n if self.right < 0:\n self.parent.remove_widget(self)\n\n'''\nControls all spikes.\nAdd spike is set to 0 which is descreases over time.\nWhen less than 0 set a random height and x position.\nPlace the new spike widget and randomise the add spike variable countdown.\n'''\nclass Pipes(Widget):\n add_pipe = 0\n def update(self, dt):\n for child in list(self.children):\n child.update()\n self.add_pipe -= dt\n if self.add_pipe < 0:\n y = random.randint(self.y + 50, self.height - 144)\n x = random.randint(self.width, self.width + 40)\n self.add_widget(Pipe(pos=(x, y)))\n self.add_pipe = random.uniform(0.5,4.0)\n\n'''\nSet the background image as a sprite and set it's size to the size of the image.\nCreate a duplicate of the background image and set the x position to the width of the background.\n'''\nclass Background(Widget):\n def __init__(self, source):\n super(Background, self).__init__()\n self.image = Sprite(source=source)\n self.add_widget(self.image)\n self.size = self.image.size\n self.image_dupe = Sprite(source=source, x=self.width)\n self.add_widget(self.image_dupe)\n self.scored = False\n\n '''\n Update the image and the duplicate by moving 2 pixels to the left.\n When the image is less or equal to 0 set the image to the 0 x position\n And the image duplicate x position to the width of the image so appears offscreen.\n '''\n def update(self):\n self.image.x -= 2\n self.image_dupe.x -= 2\n\n if self.image.right <= 0:\n self.image.x = 0\n self.image_dupe.x = self.width\n\n'''\nThe bird is called as a sprite and the velocity is set to 0 which is the movement in the y axis.\nThe gravity is set to -0.3 as it is a suitable fall speed\n'''\nclass Bird(Sprite):\n def __init__(self, pos):\n super(Bird, self).__init__(source=\"atlas://Resources/images/owl_anim/wing-up\", pos=pos)\n self.velocity_y = 0\n self.gravity = -.3\n\n '''\n The velocity and gravity are added to make the combined direction of the y axis.\n The altas changes the image of the bird depending on the velocity of the bird.\n '''\n def update(self):\n self.velocity_y += self.gravity\n self.velocity_y = max(self.velocity_y, -10)\n self.y += self.velocity_y\n if self.velocity_y < -4:\n self.source = \"atlas://Resources/images/owl_anim/wing-up\"\n elif self.velocity_y < 0:\n self.source = \"atlas://Resources/images/owl_anim/wing-mid\"\n\n '''\n When the screen is tapped or mouse is clicked increase the upward force, change the image of the bird and make sound.\n The velocity increase is 5.5 as it is a suitable y axis movement.\n '''\n def on_touch_down(self, *ignore):\n self.velocity_y = 5.5\n self.source = \"atlas://Resources/images/owl_anim/wing-down\"\n sfx_flap.play()\n\n'''\nThe ground is called as a sprite and updated to move 2 pixels left.\nWhen the x position is less than -24 then set the reset the position back.\nThis is a number that is suitable to the length of the ground sprite.\n'''\nclass Ground(Sprite):\n def update(self):\n self.x -= 2\n\n if self.x < -24:\n self.x += 24\n\n'''\nAll the widgets needed for the main game are added to the game widget.\nSet the clock to update.\nSet the label opacity for game over to be 0.\nSet the bird and spike position.\nSet game over to default false and score to 0.\n'''\nclass Game(Widget):\n def __init__(self):\n super(Game, self).__init__()\n self.background = Background(source=\"Resources/images/metal_background.png\")\n self.size = self.background.size\n self.add_widget(self.background)\n self.ground = Ground(source=\"Resources/images/metal_ground.png\")\n self.pipes = Pipes(pos=(0, self.ground.height), size=self.size)\n self.add_widget(self.pipes)\n self.add_widget(self.ground)\n self.score_label = Label(center_x=self.center_x,\n top=self.top - 30, text=\"0\")\n self.add_widget(self.score_label)\n self.over_label = Label(center=self.center, opacity=0,\n text=\"Game Over\")\n self.add_widget(self.over_label)\n self.bird = Bird(pos=(20, 40))\n self.add_widget(self.bird)\n Clock.schedule_interval(self.update, 1.0/60.0)\n self.game_over = False\n self.score = 0\n\n '''\n Update te game and each of the widgets.\n '''\n def update(self, dt):\n if self.game_over:\n return\n\n self.background.update()\n self.bird.update()\n self.ground.update()\n self.pipes.update(dt)\n\n '''\n If the bird is less than 40 in the y axis, which is the level of the ground, then set the y to 40.\n If the bird goes above the top of the screen -40 then set the top of the player to be -44 from the top.\n This stops the player from going off the screen.\n '''\n if (self.bird.y < 40):\n self.bird.y = 40\n if (self.bird.top > self.height-40):\n self.bird.top = self.height-44\n\n '''\n When the background x position reaches 0 then increase the score by 1.\n '''\n if self.background.image.x == 0:\n self.background.scored = True\n self.score += 1\n self.score_label.text = str(self.score) + \" m\"\n sfx_score.play()\n\n '''\n If the player collides with the spikes then set game over to true.\n If the spikes goes past the player then set scored to true and add to score.\n '''\n for pipe in self.pipes.children:\n if pipe.top_image.collide_widget(self.bird):\n self.game_over = True\n if pipe.bottom_image.collide_widget(self.bird):\n self.game_over = True\n\n '''\n If game over is true then change opacity of game over label to 1 and play music.\n Set the next event to tap or click.\n '''\n if self.game_over:\n sfx_die.play()\n self.over_label.opacity = 1\n self.bind(on_touch_down=self._on_touch_down)\n\n '''\n To remove game widget and return to main menu click or tap.\n '''\n def _on_touch_down(self, *ignore):\n parent = self.parent\n parent.remove_widget(self)\n parent.add_widget(Scores())\n\n'''\nThis is the game app.\nSet the window size.\n'''\nclass Metal_OwlApp(App):\n def build(self):\n top = Widget()\n top.add_widget(Menu())\n Window.size = top.children[0].size\n return top\n\nif __name__ == \"__main__\":\n Metal_OwlApp().run()\n","sub_path":"metal_owl/metal_owl.py","file_name":"metal_owl.py","file_ext":"py","file_size_in_byte":12585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"555952580","text":"\"\"\"\n9. Среди натуральных чисел, которые были введены, найти\nнаибольшее по сумме цифр. Вывести на экран это число и сумму его цифр.\n\"\"\"\n\n\ndef sum_of_digits(number):\n if len(str(number)) == 1:\n return number\n return number % 10 + sum_of_digits(number // 10)\n\n\nn = 3\nresult = 0\n\nfor i in range(n):\n number = int(input(f'Введите число {i + 1}: '))\n sum_number = sum_of_digits(number)\n if result < sum_number:\n desired_number = number\n result = sum_number\n\nprint(desired_number, result)\n","sub_path":"Lesson_2/9.py","file_name":"9.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"257741568","text":"# fibonacci.py\n\ndef fib():\n fibs = [1, 2]\n #count = 1\n #num = 0\n #cant = 0\n #fibs = []\n for i in range(1,9):\n\n fib_n = fibs[i] + fibs[i-1]\n fibs.append(fib_n)\n '''\n while num < 100:\n #print (count)\n num = num + 1\n cont = cant\n cant = count\n count = cont + cant\n fibs.append(count)\n '''\n ''' \n implement Fibonacci sequence to calculate the \n first 10 Fibonacci numbers, note Fn = Fn-1 + Fn-2\n '''\n\n return fibs\n\ndef main():\n print('OUTPUT', fib())\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"fib.py","file_name":"fib.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"559036816","text":"\"\"\"Support for MySensors sensors.\"\"\"\nfrom __future__ import annotations\n\nfrom awesomeversion import AwesomeVersion\n\nfrom homeassistant.components import mysensors\nfrom homeassistant.components.sensor import DOMAIN, SensorEntity\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.const import (\n CONDUCTIVITY,\n DEGREE,\n DEVICE_CLASS_HUMIDITY,\n DEVICE_CLASS_TEMPERATURE,\n ELECTRICAL_CURRENT_AMPERE,\n ELECTRICAL_VOLT_AMPERE,\n ENERGY_KILO_WATT_HOUR,\n FREQUENCY_HERTZ,\n LENGTH_METERS,\n LIGHT_LUX,\n MASS_KILOGRAMS,\n PERCENTAGE,\n POWER_WATT,\n TEMP_CELSIUS,\n TEMP_FAHRENHEIT,\n VOLT,\n VOLUME_CUBIC_METERS,\n)\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers.dispatcher import async_dispatcher_connect\nfrom homeassistant.helpers.entity_platform import AddEntitiesCallback\n\nfrom .const import MYSENSORS_DISCOVERY, DiscoveryInfo\nfrom .helpers import on_unload\n\nSENSORS: dict[str, list[str | None] | dict[str, list[str | None]]] = {\n \"V_TEMP\": [None, None, DEVICE_CLASS_TEMPERATURE],\n \"V_HUM\": [PERCENTAGE, \"mdi:water-percent\", DEVICE_CLASS_HUMIDITY],\n \"V_DIMMER\": [PERCENTAGE, \"mdi:percent\", None],\n \"V_PERCENTAGE\": [PERCENTAGE, \"mdi:percent\", None],\n \"V_PRESSURE\": [None, \"mdi:gauge\", None],\n \"V_FORECAST\": [None, \"mdi:weather-partly-cloudy\", None],\n \"V_RAIN\": [None, \"mdi:weather-rainy\", None],\n \"V_RAINRATE\": [None, \"mdi:weather-rainy\", None],\n \"V_WIND\": [None, \"mdi:weather-windy\", None],\n \"V_GUST\": [None, \"mdi:weather-windy\", None],\n \"V_DIRECTION\": [DEGREE, \"mdi:compass\", None],\n \"V_WEIGHT\": [MASS_KILOGRAMS, \"mdi:weight-kilogram\", None],\n \"V_DISTANCE\": [LENGTH_METERS, \"mdi:ruler\", None],\n \"V_IMPEDANCE\": [\"ohm\", None, None],\n \"V_WATT\": [POWER_WATT, None, None],\n \"V_KWH\": [ENERGY_KILO_WATT_HOUR, None, None],\n \"V_LIGHT_LEVEL\": [PERCENTAGE, \"mdi:white-balance-sunny\", None],\n \"V_FLOW\": [LENGTH_METERS, \"mdi:gauge\", None],\n \"V_VOLUME\": [f\"{VOLUME_CUBIC_METERS}\", None, None],\n \"V_LEVEL\": {\n \"S_SOUND\": [\"dB\", \"mdi:volume-high\", None],\n \"S_VIBRATION\": [FREQUENCY_HERTZ, None, None],\n \"S_LIGHT_LEVEL\": [LIGHT_LUX, \"mdi:white-balance-sunny\", None],\n },\n \"V_VOLTAGE\": [VOLT, \"mdi:flash\", None],\n \"V_CURRENT\": [ELECTRICAL_CURRENT_AMPERE, \"mdi:flash-auto\", None],\n \"V_PH\": [\"pH\", None, None],\n \"V_ORP\": [\"mV\", None, None],\n \"V_EC\": [CONDUCTIVITY, None, None],\n \"V_VAR\": [\"var\", None, None],\n \"V_VA\": [ELECTRICAL_VOLT_AMPERE, None, None],\n}\n\n\nasync def async_setup_entry(\n hass: HomeAssistant,\n config_entry: ConfigEntry,\n async_add_entities: AddEntitiesCallback,\n) -> None:\n \"\"\"Set up this platform for a specific ConfigEntry(==Gateway).\"\"\"\n\n async def async_discover(discovery_info: DiscoveryInfo) -> None:\n \"\"\"Discover and add a MySensors sensor.\"\"\"\n mysensors.setup_mysensors_platform(\n hass,\n DOMAIN,\n discovery_info,\n MySensorsSensor,\n async_add_entities=async_add_entities,\n )\n\n on_unload(\n hass,\n config_entry.entry_id,\n async_dispatcher_connect(\n hass,\n MYSENSORS_DISCOVERY.format(config_entry.entry_id, DOMAIN),\n async_discover,\n ),\n )\n\n\nclass MySensorsSensor(mysensors.device.MySensorsEntity, SensorEntity):\n \"\"\"Representation of a MySensors Sensor child node.\"\"\"\n\n @property\n def force_update(self) -> bool:\n \"\"\"Return True if state updates should be forced.\n\n If True, a state change will be triggered anytime the state property is\n updated, not just when the value changes.\n \"\"\"\n return True\n\n @property\n def state(self) -> str | None:\n \"\"\"Return the state of this entity.\"\"\"\n return self._values.get(self.value_type)\n\n @property\n def device_class(self) -> str | None:\n \"\"\"Return the device class of this entity.\"\"\"\n return self._get_sensor_type()[2]\n\n @property\n def icon(self) -> str | None:\n \"\"\"Return the icon to use in the frontend, if any.\"\"\"\n return self._get_sensor_type()[1]\n\n @property\n def unit_of_measurement(self) -> str | None:\n \"\"\"Return the unit of measurement of this entity.\"\"\"\n set_req = self.gateway.const.SetReq\n if (\n AwesomeVersion(self.gateway.protocol_version) >= AwesomeVersion(\"1.5\")\n and set_req.V_UNIT_PREFIX in self._values\n ):\n custom_unit: str = self._values[set_req.V_UNIT_PREFIX]\n return custom_unit\n\n if set_req(self.value_type) == set_req.V_TEMP:\n if self.hass.config.units.is_metric:\n return TEMP_CELSIUS\n return TEMP_FAHRENHEIT\n\n unit = self._get_sensor_type()[0]\n return unit\n\n def _get_sensor_type(self) -> list[str | None]:\n \"\"\"Return list with unit and icon of sensor type.\"\"\"\n pres = self.gateway.const.Presentation\n set_req = self.gateway.const.SetReq\n\n _sensor_type = SENSORS.get(set_req(self.value_type).name, [None, None, None])\n if isinstance(_sensor_type, dict):\n sensor_type = _sensor_type.get(\n pres(self.child_type).name, [None, None, None]\n )\n else:\n sensor_type = _sensor_type\n return sensor_type\n","sub_path":"homeassistant/components/mysensors/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":5350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"290288760","text":"import os\nimport sys\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(dest='testcase',help = 'Set a test case')\nargs = parser.parse_args()\ncase = args.testcase;\n\nif case == \"test\":\n\tos.system('interpreter/build/interpreter test/testbyte')\n\nelif case == \"login\":\n\tos.system('interpreter/build/interpreter login/loginbyte')\n\nelif case == \"backdoor\":\n\tos.system('backdoor/build/interpreter login/loginbyte')\n\nelse:\n\tprint(\"Wrong case. type 'test' or 'login' or 'backdoor'\")","sub_path":"execute.py","file_name":"execute.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"634965836","text":"import shutil\n\nimport os\n\n\ndef limit_files_per_folder(folder, max_number_of_files_per_folder):\n for root, directories, files in os.walk(folder, topdown=False):\n for directory in directories:\n dir_path = os.path.join(root, directory)\n files_in_folder = len(os.listdir(dir_path))\n if files_in_folder > max_number_of_files_per_folder:\n number_of_subfolders = ((files_in_folder - 1) // max_number_of_files_per_folder) + 1\n for subFolderNumber in range(1, number_of_subfolders + 1):\n sub_folder_path = os.path.join(dir_path, str(subFolderNumber))\n if not os.path.exists(sub_folder_path):\n os.mkdir(sub_folder_path)\n file_counter = 1\n for file_name in os.listdir(dir_path):\n source = os.path.join(dir_path, file_name)\n if os.path.isfile(source):\n dest_dir = str(((file_counter - 1) // max_number_of_files_per_folder) + 1)\n destination = os.path.join(dir_path, dest_dir, file_name)\n shutil.move(source, destination)\n file_counter += 1\n","sub_path":"numberOfFilesPerFolderLimiter.py","file_name":"numberOfFilesPerFolderLimiter.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"532864036","text":"from tkinter import *\nfrom tkinter import messagebox\nimport requests\n\n# 创建窗口\nroot = Tk()\n# 标题\nroot.title('中英互译')\n# 窗口大小\nroot.geometry('370x100')\n# 窗口位置\n# root.geometry('+600+450')\ns_with = root.winfo_screenwidth() # 获取屏幕宽\ns_height = root.winfo_screenheight() # 获取屏幕高度\n# 计算页面打开在屏幕中央的位置\nl_x = str(round((s_with - 370) / 2))\nl_y = str(round((s_height - 100) / 2))\nroot.geometry('+' + l_x + '+' + l_y)\n# 第一列标签\nlable = Label(root, text='请输入内容:')\n# 定位布局 grid网格式布局 pack包 place位置\nlable.grid()\n# 输入控件\nextry = Entry(root, font=('微软雅黑', 15))\nextry.grid(row=0, column=1)\nres = StringVar()\n# 翻译结果标签\nlable1 = Label(root, text='翻译结果:')\nlable1.grid(row=1, column=0)\n# 翻译结果输入框\nextry1 = Entry(root, font=('微软雅黑', 15), textvariable=res)\nextry1.grid(row=1, column=1)\n\n\n# 翻译方法\ndef translate():\n content = extry.get()\n content = content.strip()\n if content == '':\n messagebox.showinfo('提示', '请输入翻译内容')\n else:\n url = \"http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule\"\n data = {}\n data['i'] = content\n #data['from'] = 'AUTO'\n #data['to'] = 'AUTO'\n #data['smartresult'] = 'dict'\n #data['client'] = 'fanyideskweb'\n #data['salt'] = '1538295833420'\n #data['sign'] = '07'\n data['doctype'] = 'json'\n #data['version'] = '2.1'\n #data['keyfrom'] = 'fanyi.web'\n #data['action'] = 'FY_BY_REALTIME'\n #data['typoResult'] = 'false'\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'\n }\n result = requests.post(url, data, headers=headers)\n trans = result.json()\n print(trans)\n tran = trans['translateResult'][0][0]['tgt']\n res.set(tran)\n\n\n# 按钮\nbutton = Button(root, text='翻译', width='10', command=translate)\n# sticky 对齐方式 N S W E 上下左右\nbutton.grid(row=2, column=0, sticky=W)\n\n# 退出按钮 command是点击事件的方法\nexit_button = Button(root, text='退出', width='10', command=root.quit)\nexit_button.grid(row=2, column=1, sticky=E)\n\n# 显示窗口 消息循环 接收对窗口的所有操作的消息\nroot.mainloop()\n","sub_path":"翻译.py","file_name":"翻译.py","file_ext":"py","file_size_in_byte":2419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"450484171","text":"# coding: utf-8\n\nimport os.path as path\nimport pandas as pd\n\n\nclass ConversationReader:\n \"\"\"\n Parsing from the Corpus data.\n\n Running by lazy. Fit-in at Tracy's Corpus format\n\n Parameters\n ----------\n file_path: str\n path information of corpus file.\n\n delimiter: str, optional (default=';').\n string for identify between sentence and tag\n\n Attributes\n ----------\n input_file_path : str\n Path to the corpus file.\n\n delimiter : str\n Symbol to use to separate values in records\n \"\"\"\n\n def __init__(self, file_path, delimiter=';'):\n\n self.input_file_path = file_path\n self.name = path.basename(file_path)\n\n self.delimiter = delimiter\n\n def __repr__(self):\n return \"\" % self.name\n\n def _read(self):\n \"\"\"\n Parsing Corpus files to sentences and tags\n \"\"\"\n try:\n data = pd.read_csv(self.input_file_path, delimiter=self.delimiter)\n except FileNotFoundError:\n raise FileNotFoundError(\n \"File '%s' not found. You should download it manually or use\"\n \" `data/download.py`. See `README.md` for more information.\" %\n self.input_file_path)\n\n agg = (data[['message_id', 'chat_id', 'data', 'category']]\n .groupby('chat_id')['data', 'category']\n .agg({'data': lambda x: \"\\n\".join(x), 'category': max})\n .reset_index())\n\n labels = list(agg['category'].values)\n sentences = list(agg['data'].values)\n\n self._labels = labels\n self._sentences = sentences\n self._data = agg\n\n def get_sentences(self):\n if not hasattr(self, '_data'):\n self._read()\n return self._sentences\n\n def get_labels(self):\n if not hasattr(self, '_data'):\n self._read()\n return self._labels\n\n @property\n def sentences(self):\n return self.get_sentences()\n\n @property\n def labels(self):\n return self.get_labels()\n\n\ndef load_dataset(file_path, delimiter=';'):\n reader = ConversationReader(file_path, delimiter)\n sentences = reader.get_sentences()\n labels = reader.get_labels()\n\n return sentences, labels\n","sub_path":"data_manager.py","file_name":"data_manager.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"382523306","text":"#!/usr/bin/env python3.4\n#coding:utf-8\n\n'''\nLeetcode一步一个脚印。\n\n345.Reverse Vowels of a String\n\n思路:\n 创建新列表保存字符串中元音字母位置,若列表长度大于1,\n 前后位置元音字母交换。\n\nNOTE:\n 忽略大小写,导致第一次提交错误。\n'''\n\ndef reverseVowels(s):\n vowels = ['a', 'e', 'i', 'o', 'u']\n lst = list(s)\n str_len = len(s)\n v = [lst.index(i) for i in lst if i.lower() in vowels]\n #for i in range(str_len):\n # 字母大小写不能忽略!!!\n # if s[i].lower() in vowels:\n # v.append(i)\n l = len(v)\n if l > 1:\n for index in range(l//2):\n lst[v[index]],lst[v[l-index-1]] = lst[v[l-index-1]], lst[v[index]]\n result = ''.join(lst)\n return result\n\n\ndef run():\n string = input()\n result = reverseVowels(string)\n print(result)\n\nif __name__ == '__main__':\n run()\n","sub_path":"2016-04-26/reverse_vowels_of_a_String.py","file_name":"reverse_vowels_of_a_String.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"376109755","text":"# -*- coding: utf-8 -*-\n\"\"\"\n义工是否和老人有互动主程序\n\"\"\"\n\nfrom scipy.spatial import distance as dist\nfrom PIL import Image, ImageDraw, ImageFont\nimport cv2\nimport time\nimport os\nimport imutils\nimport numpy as np\nimport subprocess\n\n\ndef checkingvolunteeractivity(frame, output_activity_path,\n FACE_ACTUAL_WIDTH, ACTUAL_DISTANCE_LIMIT,\n id_card_to_name, id_card_to_type,\n faceutil, python_path, roomID,\n activity_time_controller):\n camera_turned = 0\n frame = imutils.resize(frame,\n width=640,\n height=480) # 压缩,为了加快识别速度\n\n face_location_list, names = faceutil.get_face_location_and_name(frame)\n\n # 得到画面的四分之一位置和四分之三位置,并垂直划线\n one_sixth_image_center = (int(640 / 6), int(480 / 6))\n five_sixth_image_center = (int(640 / 6 * 5),\n int(480 / 6 * 5))\n\n cv2.line(frame, (one_sixth_image_center[0], 0),\n (one_sixth_image_center[0], 480),\n (0, 255, 255), 1)\n cv2.line(frame, (five_sixth_image_center[0], 0),\n (five_sixth_image_center[0], 480),\n (0, 255, 255), 1)\n\n people_type_list = list(set([id_card_to_type[i] for i in names]))\n\n volunteer_name_direction_dict = {}\n volunteer_centroids = []\n volunteer_name = \"\"\n old_people_centroids = []\n old_people_name = []\n\n # loop over the face bounding boxes\n for ((left, top, right, bottom), name) in zip(face_location_list, names): # 处理单个人\n\n person_type = id_card_to_type[name]\n # 将人脸框出来\n rectangle_color = (0, 0, 255)\n if person_type == 'old_people':\n rectangle_color = (0, 0, 128)\n elif person_type == 'employee':\n rectangle_color = (255, 0, 0)\n elif person_type == 'volunteer':\n rectangle_color = (0, 255, 0)\n else:\n pass\n cv2.rectangle(frame, (left, top), (right, bottom),\n rectangle_color, 2)\n\n if 'volunteer' not in people_type_list: # 如果没有义工,直接跳出本次循环\n continue\n\n if person_type == 'volunteer': # 如果检测到有义工存在\n # 获得义工位置\n volunteer_face_center = (int((right + left) / 2),\n int((top + bottom) / 2))\n volunteer_centroids.append(volunteer_face_center)\n volunteer_name = name\n\n cv2.circle(frame,\n (volunteer_face_center[0], volunteer_face_center[1]),\n 8, (255, 0, 0), -1)\n\n adjust_direction = ''\n # face locates too left, servo need to turn right,\n # so that face turn right as well\n if volunteer_face_center[0] < one_sixth_image_center[0]:\n adjust_direction = 'right'\n elif volunteer_face_center[0] > five_sixth_image_center[0]:\n adjust_direction = 'left'\n\n volunteer_name_direction_dict[name] = adjust_direction\n\n elif person_type == 'old_people': # 如果没有发现义工\n old_people_face_center = (int((right + left) / 2),\n int((top + bottom) / 2))\n old_people_centroids.append(old_people_face_center)\n old_people_name.append(name)\n\n cv2.circle(frame,\n (old_people_face_center[0], old_people_face_center[1]),\n 4, (0, 255, 0), -1)\n else:\n pass\n\n # 人脸识别和表情识别都结束后,把表情和人名写上 (同时处理中文显示问题)\n img_PIL = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\n draw = ImageDraw.Draw(img_PIL)\n final_label = id_card_to_name[name]\n draw.text((left, top - 30), final_label,\n font=ImageFont.truetype('C:\\\\Windows\\\\Fonts\\\\SIMLI.TTF', 40),\n fill=(255, 0, 0)) # linux\n # 转换回OpenCV格式\n frame = cv2.cvtColor(np.asarray(img_PIL), cv2.COLOR_RGB2BGR)\n\n # 义工追踪逻辑\n if 'volunteer' in people_type_list:\n volunteer_adjust_direction_list = list(volunteer_name_direction_dict.values())\n if '' in volunteer_adjust_direction_list: # 有的义工恰好在范围内,所以不需要调整舵机\n x=1\n # print('有义工恰好在可见范围内,摄像头不需要转动')\n else:\n adjust_direction = volunteer_adjust_direction_list[0]\n camera_turned = 1\n # print('摄像头需要 turn %s %d 度' % (adjust_direction, 20))\n\n # 在义工和老人之间划线\n is_activity = False\n\n if camera_turned == 0:\n for i in volunteer_centroids:\n for j_index, j in enumerate(old_people_centroids):\n pixel_distance = dist.euclidean(i, j)\n face_pixel_width = sum([i[2] - i[0] for i in face_location_list]) / len(face_location_list)\n pixel_per_metric = face_pixel_width / FACE_ACTUAL_WIDTH\n actual_distance = pixel_distance / pixel_per_metric\n\n if actual_distance < ACTUAL_DISTANCE_LIMIT:\n cv2.line(frame, (int(i[0]), int(i[1])),\n (int(j[0]), int(j[1])), (255, 0, 255), 2)\n\n label = 'distance: %dcm' % actual_distance\n cv2.putText(frame, label, (frame.shape[1] - 150, 30),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5,\n (0, 0, 255), 2)\n is_activity = True\n\n if is_activity:\n if activity_time_controller.activity_timing == 0:\n activity_time_controller.set_activity_timing(1)\n activity_time_controller.start_activity_start_time()\n\n else:\n activity_end_time = time.time()\n difference = activity_end_time - activity_time_controller.activity_start_time\n\n if difference > activity_time_controller.activity_limit_time:\n activity_image_path = os.path.join(output_activity_path,\n 'snapshot_%s.jpg' % (time.strftime('%Y%m%d_%H%M%S')))\n cv2.imwrite(activity_image_path, frame) # snapshot\n activity_time_controller.set_activity_timing(0)\n\n # 传输抓拍的义工交互的照片\n # insert into database\n old_volunteer_types = \"\"\n old_volunteer_names = \"\"\n for i in range(0, len(old_people_name)):\n old_volunteer_names += id_card_to_name[old_people_name[i]]\n old_volunteer_types += \"0\"\n old_volunteer_names += \"_\"\n old_volunteer_types += \"_\"\n\n old_volunteer_types += \"2\"\n old_volunteer_names += id_card_to_name[volunteer_name]\n\n print(old_volunteer_types)\n print(old_volunteer_names)\n\n command = '%s inserting.py --image_path %s --eventName %s --room %s --renyuanType ' \\\n '%s --renyuan %s' % (\n python_path, activity_image_path, \"3\", roomID, old_volunteer_types, old_volunteer_names)\n p = subprocess.Popen(command, shell=True)\n return frame\n","sub_path":"A_Final_Sys/oldcare/CV_part/volunterActivity.py","file_name":"volunterActivity.py","file_ext":"py","file_size_in_byte":7485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"415232911","text":"class Node: \n def __init__(self, data) -> None:\n self.data = data\n self.next = None\n\ndef deleteLastElement(head: Node) -> Node:\n if head == None or head.next == None:\n return None\n else:\n tmp = head\n ptr, nextPtr = tmp, tmp.next\n while(nextPtr.next != None):\n ptr = nextPtr\n nextPtr = nextPtr.next\n\n ptr.next = None\n return tmp\n\ndef printLL(head):\n tmp = head\n while(tmp != None):\n print(tmp.data, end= ' -> ')\n tmp = tmp.next\n print(\"*\")\n\ndef main():\n f = Node(6)\n e = Node(5)\n e.next = f\n d = Node(4)\n d.next = e\n c = Node(3)\n c.next = d\n b = Node(2)\n b.next = c\n a = Node(1)\n a.next = b\n header = a\n\n printLL(header)\n print(\"After deletion of last node: \")\n printLL(deleteLastElement(header))\n\nmain()","sub_path":"DeleteLastElement.py","file_name":"DeleteLastElement.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"402826307","text":"class Student:\n \"\"\"The student class models a student with a name, ID number,\n a graduation year, and a concentration.\n \"\"\"\n def __init__(self, name, idNumber, concentration, graduationYear=2020):\n self._name = name\n self._idNumber = idNumber\n self._graduationYear = graduationYear\n self._concentration = concentration\n\n def set_concentration(self, concentration):\n self._concentration = concentration\n\n # Other accessors/mutators...\n def print_student_info(self):\n print(\"Student named \" + self._name + \" has ID number \" + \\\n str(self._idNumber) + \", is graduating in \" + \\\n str(self._graduationYear) + \" and is studying \" + \\\n self._concentration + \".\")\n\nif __name__ == \"__main__\" :\n dara = Student(\"Dara\", 1002354, \"Physics\")\n dara.set_concentration(\"Computer Science\")\n dara.print_student_info()","sub_path":"src/pythonintro/student.py","file_name":"student.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"166178004","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Mar 7 03:05:40 2020\r\n\r\n@author: akanksha\r\n\"\"\"\r\n\r\nimport os\r\nff = open(\"C:/Users/akanksha/Desktop/IR-assignment-3-data.txt\", \"r+\",encoding=\"utf-8\",errors='ignore')\r\ntt=ff.read()\r\ngd={}\r\nt=[]\r\ny=[]\r\nt=tt.split(\"\\n\")\r\nfor i in t:\r\n y.append(i.split(\" \"))\r\n\r\n#remove '' from the list \r\nfor i in y:\r\n i.remove('')\r\n \r\ny.remove([]) \r\n\r\n\r\n\r\n\r\n \r\n#taking only 75 column \r\nc=0\r\nrelevence_original=[]\r\nqid_75=[]\r\n\r\nfor i in y:\r\n #print(i[76].startswith('75:'))\r\n if i[76].startswith('75:')==True and i[1]=='qid:4':\r\n x=[]\r\n x.append(i[76].split(\":\"))\r\n relevence_original.append(i[0])\r\n #z=[]\r\n #z.append(i[1])\r\n qid_75.append(float(x[0][1]))\r\n #z.append(float(x[0][1]))\r\n #qid.append(z)\r\n c=c+1\r\n\r\n\r\n\r\n\r\nqid_75_new=qid_75\r\nqid_75_sorted=sorted(qid_75,reverse=True)\r\n\r\ndf=relevence_original\r\nrelevence_sorted=sorted(df,reverse=True)\r\n\r\ng=0\r\nfor i in relevence_sorted:\r\n if i=='0':\r\n break\r\n else:\r\n g=g+1\r\n\r\n#sort on the basis col 75\r\nX = relevence_original\r\nY = qid_75_sorted\r\n\r\nZ = [x for _,x in sorted(zip(Y,X),reverse=True)]\r\nZ=list(map(int, Z))\r\nprint(Z) \r\nprint(Y)\r\n\r\n\r\nx_axis=[] \r\nPrecision=[]\r\nRecall=[] \r\nPrecision.append(1)\r\nRecall.append(0)\r\ndef ret():\r\n count=0\r\n for i in range(0,len(Z)):\r\n if Z[i]>0:\r\n count=count+1 \r\n p=count/(i+1) \r\n r=count/g\r\n print(\"count:\",count)\r\n Precision.append(p)\r\n Recall.append(r)\r\n x_axis.append(i+1)\r\n \r\n\r\n\r\n\r\n\r\nimport matplotlib.pyplot as plt \r\ndef main():\r\n ret()\r\n print(\"Precision-\",Precision)\r\n print(\"Recall-\",Recall)\r\n plt.plot( Recall,Precision, label = \"precision\") \r\n # plt.plot(, x_axis, label = \"recall\") \r\n plt.xlabel('Recall') \r\n # naming the y axis \r\n plt.ylabel('Precision') \r\n# giving a title to my graph \r\n #plt.title('Two lines on same graph!') \r\n \r\n\r\n# function to show the plot \r\n plt.show() \r\nmain()","sub_path":"que2_c.py","file_name":"que2_c.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"488294443","text":"import unittest\nfrom app.db_class import Comment,Pitch\nfrom app import db\n\nclass commentTest(unittest.TestCase):\n '''\n class that tests all functions that relate to pitch\n '''\n def setUp(self):\n '''\n testcase that creates an object pitch\n ''' \n self.new_pitch=Pitch(category='lifestyle',p_title='Life is great',pitch_it='Life is good enjoy it to tha fullest',post='saturday',posted_by='pyra',upvote=20,downvote=10)\n self.new_comment=Comment(id=1,p_comment='Nice work',post_com=2019-3-12,comment_by='pyra',pitch_id=4,PITCHID=4)\n def tearDown(self):\n '''\n testcase that deletes any added object after every test\n '''\n Pitch.query.delete()\n Comment.query.delete()\n\n def test_check_instanse(self):\n '''\n testcase to check instanses\n '''\n self.assertEquals(self.new_comment.id,1)\n self.assertEquals(self.new_comment.p_comment,'Nice work')\n self.assertEquals(self.new_comment.post_com,2019-3-12)\n self.assertEquals(self.new_comment.comment_by,'pyra')\n self.assertEquals(self.new_comment.pitch_id,4)\n self.assertEquals(self.new_comment.pitchID,4)\n\n self.assertEquals(self.new_comment.pitch_id,self.new_pitch.id)\n\n def test_save(self):\n '''\n testcase that tests if objects are being saved\n '''\n self.new_comment.save_comment()\n self.assertTrue(len(Comment.query.all()>0))\n\n def test_get_comment(self):\n '''\n testcase to check if one can get a comment based on picth_id\n '''\n self.new_comment.save_comment()\n got_comment=Comment.get_comments('4')\n self.assertTrue(len(got_comment)==4)\n\n \n\n","sub_path":"tests/test_comment.py","file_name":"test_comment.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"503520948","text":"from geographiclib.geodesic import Geodesic\nimport sys\nimport googlemaps\nimport string\nimport math\napi_key=\"AIzaSyD4ULRyhwhux8lmfESkwICF4ZJSJnlQesI\"\n\n\ngmaps = googlemaps.Client(key=api_key)\n\n# Gets the coordinates of a location given an address (text, lat, long etc)\ndef getCoordinates(gmaps,address):\n try:\n result = gmaps.geocode(address)\n lat = result[0]['geometry']['location']['lat']\n long = result[0]['geometry']['location']['lng']\n except:\n print(\"Something isnt right\")\n return (0,0)\n return lat,long\n\n#gets the mid point given 2 lat,long coordinates\ndef getMidPoint(c1,c2):\n lat1, lon1 = c1[0], c1[1]\n lat2, lon2 = c2[0], c2[1]\n\n # Compute path from 1 to 2\n g = Geodesic.WGS84.Inverse(lat1, lon1, lat2, lon2);\n # Compute midpoint starting at 1\n h1 = Geodesic.WGS84.Direct(lat1, lon1, g['azi1'], g['s12']/2);\n\n new_l1= h1['lat2']\n new_l2= h1['lon2']\n\n return (new_l1,new_l2)\n\n#converts gps point to a point on a plane\ndef convert_to_plane(c1):\n lat, lon = c1[0], c1[1]\n\n #assume lon/lat input\n #convert to radians\n radians_lat = lat * math.pi/180\n radians_lon = lon * math.pi/180\n\n #convert to x y z coordinates\n x = math.cos(radians_lon) * math.cos(radians_lat)\n y = math.sin(radians_lon) * math.cos(radians_lat)\n z = math.sin(radians_lat)\n\n return (x,y,z)\n\ndef find_midpoint(list_of_coords):\n x_total = 0\n y_total = 0\n z_total = 0\n\n #sum of all points\n for i in range(len(list_of_coords)):\n x_total = x_total + list_of_coords[i][0]\n y_total = y_total + list_of_coords[i][1]\n z_total = z_total + list_of_coords[i][2]\n\n #divide by total number\n x_total = x_total/len(list_of_coords)\n y_total = y_total/len(list_of_coords)\n z_total = z_total/len(list_of_coords)\n #averages are calculated by here\n\n #convert back to radians\n radians_lon = math.atan2(y_total, x_total)\n hyp = math.sqrt(x_total * x_total + y_total * y_total)\n radians_lat = math.atan2(z_total, hyp)\n\n #convert back to long/lat points\n lat = radians_lat * 180/math.pi\n lon = radians_lon * 180/math.pi\n\n return (lat,lon)\n\n#test examples here for midpoints\n\ndef finalmidpoint(locationtexts):\n coords = []\n for location in locationtexts:\n coords.append(getCoordinates(gmaps,location))\n convtoplane = []\n if all(c == 'null' for c in coords):\n return 'null'\n for coord in coords:\n if coord != 'null':\n convtoplane.append(convert_to_plane(coord))\n midpoint = find_midpoint(convtoplane)\n return midpoint\n\n#Returns a dictionary of results of venues in a radius around the 2 supplied points\ndef getPlacesNear2People(address1,address2,keyword=\"\",name=\"\",type=\"restuarant\",radius=800):\n gmaps = googlemaps.Client(key=api_key)\n midPoint = getMidPoint(getCoordinates(gmaps,address1),getCoordinates(gmaps,address2))\n fields = ['formatted_address','geometry','name','photos']\n results = gmaps.places_nearby(location=midPoint,keyword=keyword,language=\"en-AU\",min_price=1,max_price=5,name=name,open_now=True,type=type,radius=radius)\n results = formatResults(results['results'])\n return midpoint,results\n\n\ndef formatResults(results):\n resultDics = []\n i = 0\n for r in results:\n dic = {}\n dic['name'] = r['name']\n dic['price_level'] = r['price_level']\n dic['rating'] = r['rating']\n dic['types']= r['types']\n dic['coordinates'] = str(r['geometry']['location']['lat']) + ',' + str(r['geometry']['location']['lng'])\n resultDics.append(dic)\n return resultDics\n\ndef staticMap(midpoint, coordinates,zoom=\"14\",size=\"600x600\"):\n baseString = \"https://maps.googleapis.com/maps/api/staticmap?center={}&zoom={}&size={}\".format(midpoint,zoom,size)\n chars = string.ascii_uppercase + string.ascii_uppercase + string.ascii_uppercase\n colours = ['black', 'brown', 'green', 'purple', 'yellow', 'blue', 'gray', 'orange', 'red', 'white','black', 'brown', 'green', 'purple', 'yellow', 'blue', 'gray', 'orange', 'red', 'white','black', 'brown', 'green', 'purple', 'yellow', 'blue', 'gray', 'orange', 'red', 'white']\n for set in zip(colours,chars,coordinates):\n addString = \"&markers=color:{}%7Clabel:{}%7C{}\".format(set[0],set[1],set[2])\n baseString = baseString + addString\n baseString = baseString + \"&key={}\".format(api_key)\n return baseString\n\ndef GenerateStaticMap(address1,address2):\n midpoint,results = getPlacesNear2People(address1,address2)\n coords = []\n for r in results:\n coords.append(r['coordinates'])\n return staticMap(midpoint,coords)\n","sub_path":"api/midpoint.py","file_name":"midpoint.py","file_ext":"py","file_size_in_byte":4640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"70421414","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, DataLoader\nimport os, sys\nimport random\nfrom tqdm import tqdm\nfrom torch.nn.utils.rnn import (pad_packed_sequence,\n pad_sequence,\n pack_padded_sequence,\n pack_sequence)\nfrom preprocess import int_to_str, str_to_int, append_file\nfrom torchvision import transforms\nimport Levenshtein\n\nSOS = 0\nEOS = 0\n\nargs = {}\n\nargs[\"train_subsample\"] = 4\nargs[\"val_subsample\"] = -1\nargs[\"test_subsample\"] = -1\nargs[\"batch_size\"] = 2\nargs[\"lr\"] = 1e-3\nargs[\"random_sample\"] = 20\nargs[\"epochs\"] = 15\nargs[\"teacher_force\"] = 0.8\nargs[\"drop_out\"] = 0.5\n\n# rather fixed\nargs[\"max_step\"] = 250\nargs[\"num_workers\"] = 4\nargs[\"device\"] = \"cuda\" if torch.cuda.is_available() else \"cpu\"\nargs[\"vocab_size\"] = 58\n\n# model hyper parameters\nargs[\"image_embed_size\"] = 2048\nargs[\"cnn_output_size\"] = 512\nargs[\"lstm_hidden_size\"] = 512\nargs[\"char_embed_size\"] = 256\n\nclass CustomDataset(Dataset):\n def __init__(self, data, label=None, transform=None):\n self._data = data\n self._label = label\n self.transform = transform\n\n def __len__(self):\n return len(self._data)\n\n def __getitem__(self, index):\n global args\n # prepare data\n d = self._data[index]\n d = torch.from_numpy(d).float().unsqueeze(0) # (1, H, W)\n if self.transform is not None:\n d = self.transform(d)\n\n # prepare label\n l = torch.tensor([0])\n if self._label is not None:\n l = self._label[index]\n l = np.append(l, 0) # append for label\n l = torch.from_numpy(self._label[index]).long()\n return (d, l)\n \ndef collate_lines(batch):\n \"\"\"\n @Param:\n batch: list of tensor tuple: (data, label) of len B\n data of (1, H, W), label of (L, )\n @Return \n data : (B, 1, H, W) (channel is 1) \n target : padded_seq (B, L)\n target_len : tensor (B, )\n \"\"\"\n batch = sorted(batch, key = lambda x: len(x[0]), reverse = True)\n data = [b[0].unsqueeze(0) for b in batch] # B of (1, 1, H, W)\n data = torch.cat(data, dim = 0) #(B, 1, H, W)\n target = [b[1] for b in batch] # B of (L, )\n target_len = torch.tensor([len(t) for t in target])\n target = pad_sequence(target, batch_first = True)\n return data, target, target_len\n\ndef load_data():\n global args\n data = np.load('data/data.npy')\n findings = np.load('data/findings.npy')\n indications = np.load('data/indications.npy')\n impressions = np.load('data/impressions.npy')\n\n none_index = []\n # TODO: use findings \n for i, f in enumerate(findings):\n if len(f) == 0:\n none_index.append(i)\n full_index = set(np.arange(len(findings)))\n\n # train, dev, test split\n idx = list(full_index - set(none_index))\n np.random.shuffle(idx)\n total = len(idx)\n train_idx, dev_idx, test_idx = idx[:int(total*0.8)],idx[int(total*0.8):int(total*0.9)],idx[int(total*0.9):] # 8 : 1 : 1\n \n train_x, train_y= (data[train_idx][:args[\"train_subsample\"]], \n findings[train_idx][:args[\"train_subsample\"]])\n dev_x, dev_y = (data[dev_idx][:args[\"val_subsample\"]],\n findings[dev_idx][:args[\"val_subsample\"]])\n test_x, test_y = (data[test_idx][:args[\"test_subsample\"]],\n findings[test_idx][:args[\"test_subsample\"]])\n\n transform = transforms.Compose([\n transforms.ToPILImage(),\n transforms.RandomRotation(15),\n transforms.RandomVerticalFlip(),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor()\n ])\n\n train_set = CustomDataset(train_x, train_y, transform=transform)\n val_set = CustomDataset(dev_x, dev_y)\n test_set = CustomDataset(test_x, test_y)\n return (DataLoader( dataset=train_set, \n batch_size=args[\"batch_size\"],\n shuffle=True,\n collate_fn=collate_lines), \n DataLoader( dataset=val_set,\n batch_size=args[\"batch_size\"],\n shuffle=False,\n collate_fn=collate_lines),\n DataLoader( dataset=test_set,\n batch_size=args[\"batch_size\"],\n shuffle=False,\n collate_fn=collate_lines))\n\ndef conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False) \n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.elu = nn.ELU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.stride = stride\n\n def forward(self, x):\n residual = x\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.elu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n out += residual\n out = self.elu(out)\n\n return out\n\nclass ResNet(nn.Module):\n \"\"\"\n tunable hyper parameters: embeddings\n \"\"\"\n def __init__(self):\n global args \n super(ResNet, self).__init__()\n self.network = nn.Sequential(\n nn.Conv2d(1,32,kernel_size = 5,padding = 0,stride = 2,bias = False),\n nn.ELU(inplace=True),\n BasicBlock(32,32), \n nn.Conv2d(32,64,kernel_size = 5,padding = 0,stride = 2,bias = False),\n nn.ELU(inplace=True),\n BasicBlock(64,64), \n nn.Conv2d(64,128,kernel_size = 5,padding = 0,stride = 2,bias = False),\n nn.ELU(inplace=True),\n BasicBlock(128,128), \n nn.Conv2d(128,512,kernel_size = 5,padding = 0,stride = 2,bias = False),\n nn.ELU(inplace=True),\n BasicBlock(512,512),\n nn.AdaptiveAvgPool2d((2,2))\n )\n self.fc = nn.Linear(args[\"image_embed_size\"], args['cnn_output_size'], bias = False) \n self.bn = nn.BatchNorm1d(args[\"cnn_output_size\"])\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def forward(self, x):\n out = self.network(x)\n out = out.view(out.size(0), -1) #(B,-1)\n out = self.fc(out) #(B, cnn_output_size)\n out = self.bn(out)\n return out\n\nclass LockedDropout(nn.Module):\n def __init__(self, p=args[\"drop_out\"]):\n self.p = p\n super().__init__()\n\n def forward(self, x):\n \"\"\"\n Args:\n x (:class:`torch.FloatTensor` [batch size, (sequence length, ) rnn hidden size]):\n \"\"\"\n if not self.training or not self.p:\n return x\n if isinstance(x, torch.Tensor): \n seq = x\n elif isinstance(x, torch.nn.utils.rnn.PackedSequence):\n seq, seq_len = pad_packed_sequence(x, batch_first = True)\n seq = seq.clone()\n if(len(seq.size()) == 2): # lstm cell\n mask = seq.new_empty(1, seq.size(1), requires_grad=False).bernoulli_(1 - self.p)\n elif(len(seq.size()) == 3):\n mask = seq.new_empty(1, seq.size(1), seq.size(2), requires_grad=False).bernoulli_(1 - self.p)\n mask = mask.div_(1 - self.p)\n mask = mask.expand_as(seq)\n res = seq * mask\n if isinstance(x, torch.Tensor):\n return res\n elif isinstance(x, torch.nn.utils.rnn.PackedSequence):\n return pack_padded_sequence(res, seq_len, batch_first = True)\n\nclass XrayNet(nn.Module):\n def __init__(self):\n global args \n super(XrayNet, self).__init__()\n vocab_size = args[\"vocab_size\"]\n embed_size = args[\"char_embed_size\"]\n hidden_size = args[\"lstm_hidden_size\"]\n cnn_output_size = args[\"cnn_output_size\"]\n self.embedding = nn.Embedding(vocab_size, embed_size)\n self.lstmcell = nn.LSTMCell(cnn_output_size + embed_size, hidden_size)\n self.dropout1 = LockedDropout()\n self.lstmcell2 = nn.LSTMCell(hidden_size, hidden_size)\n self.dropout2 = LockedDropout()\n self.logsoftmax = nn.LogSoftmax(dim = 1)\n self.character_distribution = nn.Linear(hidden_size, vocab_size) # Projection layer\n \n def forward_step(self, input_step, cnn_output, hidden_cell_state, hidden_cell_state2): \n \"\"\"\n @Param: \n input_step: (B, ) input chars\n cnn_output: (B, cnn_output_size)\n hidden_cell_state: (hidden_state, cell_state), both (B, lstm_hidden_size)\n hidden_cell_state2:(hidden_state2, cell_state2), both (B, lstm_hidden_size)\n @Return:\n raw_pred: (B, vocab_size)\n hidden_state, cell_state (B, lstm_hidden_size):\n hidden_state2, cell_state2 (B, lstm_hidden_size):\n \"\"\"\n embed = self.embedding(input_step) # (B, char_embed_size)\n embed = torch.cat((cnn_output, embed), dim = 1) #(B, char_embed_size + cnn_output_size)\n hidden_state, cell_state = self.lstmcell(embed, hidden_cell_state) #(B, H)\n hidden_state = self.dropout1(hidden_state)\n hidden_state2, cell_state2 = self.lstmcell2(hidden_state, hidden_cell_state2) #(B, H)\n hidden_state2 = self.dropout2(hidden_state2)\n raw_pred = self.logsoftmax(self.character_distribution(hidden_state2)) #(B, V)\n return raw_pred, (hidden_state, cell_state), (hidden_state2, cell_state2)\n \n def forward(self, cnn_output, mode, \n ground_truth = None):\n \"\"\"\n @Param:\n cnn_output: (B, H)\n mode: \"train\", \"test\" or \"val\"\n ground_truth : padded seq of (B, L)\n @Return:\n raw_pred_seq : prob dist of each char (B, L, vocab_size) \n output_seq : list of B of (L, ), string for each batch\n ttl_score : list of B of scalar, cumulative score for each batch\n \"\"\"\n if mode == \"train\":\n max_step = ground_truth.size(1)\n else:\n max_step = args[\"max_step\"]\n batch_size = cnn_output.size(0)\n\n raw_pred_seq = []\n output_seq = []\n all_score = []\n\n # initialize hidden state\n hidden_cell_state = None\n hidden_cell_state2 = None\n \n # initialize first char as \n input_step = torch.tensor([SOS] * batch_size).to(args[\"device\"])\n\n for step in range(max_step):\n raw_pred, hidden_cell_state, hidden_cell_state2 = (\n self.forward_step(input_step, cnn_output, \n hidden_cell_state, hidden_cell_state2))\n\n if mode == \"train\":\n raw_pred_seq.append(raw_pred.unsqueeze(1)) #(B, 1, vocab_size)\n\n # generate output\n # greedy\n if mode == \"train\" or mode == \"val\":\n output = raw_pred.max(dim = 1)[1] # argmax (B, )\n if mode == \"val\":\n # print(\"raw_pred, output: \", raw_pred, output)\n output_seq.append(output.unsqueeze(1).cpu().detach()) #(B, 1)\n all_score.append(torch.gather(raw_pred, 1, output.view(-1, 1))) #(B, 1)\n # random\n else:\n dist = torch.distributions.Categorical(logits=raw_pred)\n output = dist.sample() #(B, )\n output_seq.append(output.unsqueeze(1).cpu().detach()) #(B, 1)\n all_score.append(torch.gather(raw_pred, 1, output.view(-1, 1))) #(B, 1)\n \n if mode == \"train\" and np.random.rand() < args[\"teacher_force\"]:\n input_step = ground_truth[:,step] \n else:\n input_step = output\n\n if mode == \"train\":\n raw_pred_seq = torch.cat(raw_pred_seq, dim=1) #(B, L, vocab_size)\n if mode == \"val\" or mode == \"test\": # calculate loss and each output length\n output_seq = torch.cat(output_seq, dim=1) #(B, L)\n all_score = torch.cat(all_score, dim=1) #(B, L)\n # print(\"output_seq: \", output_seq)\n output_fixed = []\n score_fixed = []\n for output, score in zip(output_seq, all_score):\n idx = (output == EOS).nonzero()\n if len(idx) == 0: # no contained, until final\n output_fixed.append(output)\n score_fixed.append(score.mean().item())\n else: # \n output_fixed.append(output[:idx[0] + 1])\n score_fixed.append(score[:idx[0] + 1].mean().item())\n\n if mode == \"train\":\n return raw_pred_seq, None, None\n else:\n return None, output_fixed, score_fixed\n\ndef train(epoch, cnn, lstm, train_loader, optimizer, criterion):\n global args\n cnn, lstm = cnn.train(), lstm.train()\n ttl_perplexity = 0\n ttl_loss = 0\n for batch_id, (inputs, targets, target_len) in tqdm(enumerate(train_loader)):\n inputs, targets = inputs.to(args[\"device\"]), targets.to(args[\"device\"])\n \n # Input shape: (B, C, H, W) = (B, 1, 512, 512)\n cnn_out = cnn(inputs) # (B, cnn_output_size)\n # TODO:\n raw_pred_seq, _, _ = lstm(cnn_out, mode = \"train\", ground_truth = targets)\n\n # mask the padding part of generated seq to be -1 and ignore for loss\n comp_range = torch.arange(target_len.max().item()).unsqueeze(0)\n transript_mask = target_len.unsqueeze(1)\n transript_mask = (transript_mask <= comp_range).to(args[\"device\"])\n targets_masked = targets.clone()\n targets_masked[transript_mask] = -1\n \n # backward pass\n loss = criterion(raw_pred_seq.view(-1, args[\"vocab_size\"]),\n targets_masked.view(-1))\n perplexity = (loss / ((1 - transript_mask).sum()).float()).exp().item()\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n ttl_loss = ttl_loss + loss.item()/args[\"batch_size\"] # per utter loss\n ttl_perplexity = ttl_perplexity + perplexity # exp loss per char\n if batch_id > 0 and batch_id % 50 == 0:\n print(\"[Epoch{} batch {}] loss: {} perplexity: {}\".format(\n epoch, batch_id, \n ttl_loss/(batch_id +1), ttl_perplexity/(batch_id + 1)))\n return ttl_loss/(batch_id + 1), ttl_perplexity/(batch_id + 1)\n\ndef validation(cnn, lstm, dev_loader):\n global args\n cnn, lstm = cnn.eval(), lstm.eval()\n ttl_dist = 0\n with torch.no_grad():\n for batch_id, (inputs, targets, _) in tqdm(enumerate(dev_loader)):\n inputs = inputs.to(args[\"device\"])\n cnn_out = cnn(inputs)\n _, output_seq, _ = lstm(cnn_out, mode = \"val\")\n\n # translate to string\n output_seq = [int_to_str(out.numpy()) for out in output_seq]\n targets_seq = [int_to_str(tar.numpy()) for tar in targets]\n # comp distance\n dist = [Levenshtein.distance(out, tar) for out, tar in zip(output_seq, targets_seq)]\n ttl_dist += np.mean(dist)\n print(\"[Validation] pred sample: {}, target: {}\".format(output_seq, targets_seq))\n return ttl_dist/ (batch_id + 1)\n\ndef test(cnn, lstm, test_loader):\n global args\n cnn, lstm = cnn.eval(), lstm.eval()\n predictions = []\n\n with torch.no_grad():\n for batch_id, (inputs, targets, _) in tqdm(enumerate(test_loader)):\n inputs = inputs.to(args[\"device\"])\n cnn_out = cnn(inputs)\n\n # use greedy search as best for now\n _, best_outs, best_scores = lstm(cnn_out, mode = \"val\")\n\n # use random search\n for i in range(args[\"random_sample\"]):\n _, outs, scores = lstm(cnn_out, mode = \"test\")\n for idx, (out, score) in enumerate(zip(outs, scores)):\n if score > best_scores[idx]:\n best_outs[idx] = out\n best_scores[idx] = score\n predictions.extend(list(map(int_to_str, predictions)))\n return predictions\n\nif __name__ == \"__main__\":\n train_loader, val_loader, test_loader = load_data()\n cnn = ResNet().to(args[\"device\"])\n lstm = XrayNet().to(args[\"device\"])\n # cnn = torch.load(\"saved_models/cnn_1.pt\", map_location = args[\"device\"])\n # lstm = torch.load(\"saved_models/lstm_1.py\", map_location = args[\"device\"])\n\n optimizer = torch.optim.Adam([{'params':cnn.parameters()}, \n {'params':lstm.parameters()}],\n lr = args[\"lr\"])\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, \n mode=\"min\", factor=0.1, patience=3,\n min_lr=1e-6)\n criterion = nn.NLLLoss(reduction=\"sum\", ignore_index=-1).to(args[\"device\"])\n best_dist = float(\"inf\")\n\n for epoch in range(args[\"epochs\"]):\n # train\n epoch_loss, epoch_perplexity = (\n train(epoch, cnn, lstm, train_loader, optimizer, criterion))\n print(\"[Epoch {}] loss: {}, perplexity: {}\".format(\n epoch, epoch_loss, epoch_perplexity))\n append_file(\"train_out.csv\", epoch_loss, epoch_perplexity)\n\n # val\n dist = validation(cnn, lstm, val_loader)\n print(\"[Validation] Levenshtein distance:\", dist)\n append_file(\"val_out.csv\", dist)\n \n # step lr\n scheduler.step(dist)\n if dist < best_dist:\n print(\"crt: {}, best: {}, saving...\".format(dist, best_dist))\n best_dist = dist\n torch.save(cnn, \"saved_models/cnn_{}.pt\".format(epoch))\n torch.save(lstm, \"saved_models/lstm_{}.pt\".format(epoch))\n test(cnn, lstm, test_loader)\n","sub_path":"report/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":18662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"248311442","text":"# -*- encoding: utf-8 -*- \n\"\"\"\n@Author : zYx.Tom\n@Contact : 526614962@qq.com\n@site : https://zhuyuanxiang.github.io\n---------------------------\n@Software : PyCharm\n@Project : introduction_to_ml_with_python\n@File : C0405_auto_select_feature.py\n@Version : v0.1\n@Time : 2019-10-10 09:55\n@License : (C)Copyright 2018-2019, zYx.Tom\n@Reference : 《Python机器学习基础教程》, Sec0405,P181\n@Desc : 数据表示与特征工程。自动选择特征。\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# 设置数据显示的精确度为小数点后3位\nnp.set_printoptions(precision = 3, suppress = True, threshold = np.inf, linewidth = 200)\n\n\n# 4.5. 自动化特征选择(基于特征的影响力)\n# 4.5.1. 单变量统计(计算每个特征和目标值之间的关系是否存在统计显著性,然后选择最高置信度的特征)\n# 对于分类问题,也称为方差分析;关键性质是单变量的,即只单独考虑每个特征\n# 计算速度快,不需要构建模型;完全独立于特征选择之后的应用模型\ndef univariate_statistics():\n from sklearn.datasets import load_breast_cancer\n cancer = load_breast_cancer()\n rng = np.random.RandomState(42)\n noise = rng.normal(size = (len(cancer.data), 50))\n # 在数据中添加噪声特征(前30个特征来自数据集,后50个特征来自噪声),形成噪声数据\n X_w_noise = np.hstack([cancer.data, noise])\n\n # 从噪声数据中选择一半的数据作为训练集\n from sklearn.model_selection import train_test_split\n X_train, X_test, y_train, y_test = train_test_split(X_w_noise, cancer.target, random_state = 0, test_size = .5)\n # 从噪声数据中选择一部分特征作为训练数据的特征\n # 特征越少,精确度越高(可能是噪声干扰越少)\n fig, axes = plt.subplots(4, 1, figsize = (15, 9))\n plt.suptitle(\"图4-9:SelectPercentile选择的特征\")\n # 在Scikit - Learn中\n # score_func=f_classif(默认值),用于分类问题;\n # score_func=f_regression,用于回归问题\n # SelectPercentile(score_func = ?, percentile =):选择固定百分比的特征\n # SelectKBest(score_func = ?, k =):选择固定数目的特征\n from sklearn.feature_selection import SelectPercentile\n for ax, percentile in zip(axes.ravel(), [10, 25, 50, 75]):\n select = SelectPercentile(percentile = percentile)\n select.fit(X_train, y_train)\n X_train_selected = select.transform(X_train)\n print('-' * 20)\n print(\"特征比例(percentile)={}\".format(percentile))\n print('X_train.shape: {}'.format(X_train.shape))\n print('X_train_selected.shape: {}'.format(X_train_selected.shape))\n\n # 检测哪些特征被选中\n mask = select.get_support()\n # print(mask)\n\n # 将选中的特征可视化,噪声数据中的某些原始特征在特征选择中被放弃\n ax.matshow(mask.reshape(1, -1), cmap = 'gray_r')\n ax.set_xlabel(\"Sample Index\\n特征数目= {}\".format(X_train_selected.shape[1]))\n\n # 删除特征反而能够提高性能,哪怕丢失了某些原始特征以后。\n from sklearn.linear_model import LogisticRegression\n\n X_test_selected = select.transform(X_test)\n lr = LogisticRegression(solver = 'lbfgs', max_iter = 10000)\n lr.fit(X_train, y_train)\n print('Score with all features: {:.3f}'.format(lr.score(X_test, y_test)))\n lr.fit(X_train_selected, y_train)\n print('Score with only selected features: {:.3f}'.format(lr.score(X_test_selected, y_test)))\n pass\n\n\n# 4.5.2. 基于模型的特征选择(使用监督机器学习模型来判断每个特征的重要性,仅保留最重要的特征)\ndef select_features_based_on_model():\n from sklearn.datasets import load_breast_cancer\n cancer = load_breast_cancer()\n rng = np.random.RandomState(42)\n noise = rng.normal(size = (len(cancer.data), 50))\n # 在数据中添加噪声特征(前30个特征来自数据集,后50个特征来自噪声),形成噪声数据\n X_w_noise = np.hstack([cancer.data, noise])\n\n # 从噪声数据中选择一半的数据作为训练集\n from sklearn.model_selection import train_test_split\n X_train, X_test, y_train, y_test = train_test_split(X_w_noise, cancer.target, random_state = 0, test_size = .5)\n\n # 从噪声数据中选择一部分特征作为训练数据的特征\n # 特征越少,精确度越高(可能是噪声干扰越少)\n fig, axes = plt.subplots(4, 1, figsize = (15, 9))\n plt.suptitle(\"图4-10:使用RandomForestClassifier的SelectFromModel选择的特征\")\n # 使用SelectFromModel变换器从模型中选出需要的特征\n from sklearn.feature_selection import SelectFromModel\n from sklearn.ensemble import RandomForestClassifier\n from sklearn.linear_model import LogisticRegression\n # 使用不同的模型来学习,结果肯定不同,\n # 使用随机森林学习时会更好的使用数据的全局关系,\n # 使用Logistic Regression模型学习时,就只能使用每个特征自己的内在关系,\n # 所以充分学习后,就会因为有用的特征的评分不高而被放弃,导致最终评分模型学习结果较差\n # ToDo:学习特征的模型和学习数据的模型是一样时,这样的设计目的我认为主要是为了降维。\n for ax, feature_number in zip(axes.ravel(), [8, 20, 40, 60]):\n # select = SelectFromModel(RandomForestClassifier(n_estimators = 100, random_state = 42), threshold = 'median')\n # select = SelectFromModel(RandomForestClassifier(n_estimators = 100, random_state = 42),\n # max_features = feature_number, threshold = -np.inf)\n\n # 注意对比下面两个训练结果,Logistic Regression充分学习后选择的特征使得学习模型效果变差\n # select = SelectFromModel(LogisticRegression(solver = 'lbfgs',random_state = 42),\n # max_features = feature_number, threshold = -np.inf)\n select = SelectFromModel(LogisticRegression(solver = 'lbfgs', max_iter = 10000, random_state = 42),\n max_features = feature_number, threshold = -np.inf)\n select.fit(X_train, y_train)\n X_train_l1 = select.transform(X_train)\n X_test_l1 = select.transform(X_test)\n print('-' * 20)\n print('X_train.shape: {}'.format(X_train.shape))\n print('X_train_l1.shape: {}'.format(X_train_l1.shape))\n\n # 检测哪些特征被选中\n mask = select.get_support()\n # print(mask)\n\n # 将选中的特征可视化,噪声数据中的某些原始特征在特征选择中被放弃\n ax.matshow(mask.reshape(1, -1), cmap = 'gray_r')\n ax.set_xlabel('Sample Index\\n特征数目= {}'.format(feature_number))\n\n # 删除特征反而能够提高性能,哪怕丢失了某些原始特征以后。\n lr = LogisticRegression(solver = 'lbfgs', max_iter = 10000)\n lr.fit(X_train, y_train)\n print('Score with all features: {:.3f}'.format(lr.score(X_test, y_test)))\n lr.fit(X_train_l1, y_train)\n print('Score with only selected features: {:.3f}'.format(lr.score(X_test_l1, y_test)))\n # 模型选择的特征质量更好,选择更多特征也能够得到较好的精确度\n pass\n\n\n# 4.5.3. 迭代特征选择:构建一系列模型,每个模型都使用不同数量的特征\n# 递归特征消除(Recursive Feature Elimination,RFE):从所有特征开始构建模型,根据模型舍弃最不重要的特征,直到满足某个终止条件。\n# 速度较慢,效果一般。\ndef iterative_selection():\n from sklearn.datasets import load_breast_cancer\n cancer = load_breast_cancer()\n rng = np.random.RandomState(42)\n noise = rng.normal(size = (len(cancer.data), 50))\n # 在数据中添加噪声特征(前30个特征来自数据集,后50个特征来自噪声),形成噪声数据\n X_w_noise = np.hstack([cancer.data, noise])\n\n # 从噪声数据中选择一半的数据作为训练集\n from sklearn.model_selection import train_test_split\n X_train, X_test, y_train, y_test = train_test_split(X_w_noise, cancer.target, random_state = 0, test_size = .5)\n\n # 从噪声数据中选择一部分特征作为训练数据的特征\n # 特征越少,精确度越高(可能是噪声干扰越少)\n fig, axes = plt.subplots(4, 1, figsize = (15, 9))\n plt.suptitle(\"图4-11:使用随机森林分类器模型的递归特征消除选择的特征\")\n from sklearn.feature_selection import RFE\n from sklearn.ensemble import RandomForestClassifier\n from sklearn.linear_model import LogisticRegression\n for ax, feature_number in zip(axes.ravel(), [8, 20, 40, 60]):\n # select = RFE(RandomForestClassifier(n_estimators = 100, random_state = 42),\n # n_features_to_select = feature_number)\n\n # 注意对比下面两个训练结果,Logistic Regression充分学习后选择的特征使得学习模型效果变差\n # ToDo:不充分地学习,选择40个特征后,学习模型的得分会得到显著提升(有趣),为什么?\n select = RFE(LogisticRegression(solver = 'lbfgs', random_state = 42),\n n_features_to_select = feature_number)\n # select = RFE(LogisticRegression(solver = 'lbfgs', max_iter = 10000, random_state = 42),\n # n_features_to_select = feature_number)\n select.fit(X_train, y_train)\n X_train_rfe = select.transform(X_train)\n X_test_rfe = select.transform(X_test)\n print('-' * 20)\n print('X_train.shape: {}'.format(X_train.shape))\n print('X_train_rfe.shape: {}'.format(X_train_rfe.shape))\n\n # 检测哪些特征被选中\n mask = select.get_support()\n # print(mask)\n\n # 将选中的特征可视化,噪声数据中的某些原始特征在特征选择中被放弃\n ax.matshow(mask.reshape(1, -1), cmap = 'gray_r')\n ax.set_xlabel('Sample Index\\n特征数目= {}'.format(feature_number))\n\n # 删除特征反而能够提高性能,哪怕丢失了某些原始特征以后。\n lr = LogisticRegression(solver = 'lbfgs', max_iter = 10000)\n lr.fit(X_train, y_train)\n print('Score with all features: {:.3f}'.format(lr.score(X_test, y_test)))\n lr.fit(X_train_rfe, y_train)\n print('Score with only selected features: {:.3f}'.format(lr.score(X_test_rfe, y_test)))\n\n\nif __name__ == \"__main__\":\n \"\"\"对比三种自动化选择特征的方法的图形,可以发现原始特征最多的时候不一定精确度最高,\n 估计某些原始特征是噪声,对精确度的提高没有贡献。\"\"\"\n # 4.5.1. 单变量统计(计算每个特征和目标值之间的关系是否存在统计显著性,然后选择最高置信度的特征)\n # univariate_statistics()\n\n # 4.5.2. 基于模型的特征选择(使用监督机器学习模型来判断每个特征的重要性,仅保留最重要的特征)\n # select_features_based_on_model()\n\n # 4.5.3. 迭代特征选择:构建一系列模型,每个模型都使用不同数量的特征\n iterative_selection()\n import winsound\n\n # 运行结束的提醒\n winsound.Beep(600, 500)\n if len(plt.get_fignums()) != 0:\n plt.show()\n pass\n","sub_path":"C04/C0405_auto_select_feature.py","file_name":"C0405_auto_select_feature.py","file_ext":"py","file_size_in_byte":11490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"596347924","text":"from keras.applications.vgg16 import VGG16\nfrom keras.preprocessing import image\nfrom keras.models import Model\nfrom keras.layers import Dense, Flatten\nfrom keras import backend as K\nfrom keras.optimizers import SGD, Adam\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.callbacks import ModelCheckpoint\n\nimg_width = 224\nimg_height = 224\nnum_channels = 3\nbatch_size = 32\n\nbase_model = VGG16(input_shape = (img_height,img_width,num_channels),weights='imagenet', include_top=False)\n\nx = base_model.output\nx = Flatten()(x)\nx = Dense(256, activation='relu')(x)\nx = Dense(256, activation='relu')(x)\npredictions = Dense(1, activation='sigmoid')(x)\n\nmodel = Model(inputs=base_model.input, outputs=predictions)\n\nprint(model.summary())\n\nl = len(base_model.layers)\nfor i in range(l):\n if(i < int(0.7*l)):\n base_model.layers[i].trainable = False\n else:\n base_model.layers[i].trainable = True\n\n\n#for layer in model.layers:\n# print(layer.name, layer.trainable)\n\nsgd =SGD(lr=0.001, decay=1e-6, momentum=0.9)\nmodel.compile(optimizer=sgd, loss='binary_crossentropy', metrics =['accuracy'])\n\ntrain_datagen = ImageDataGenerator(rescale=1./255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True)\n\nval_datagen = ImageDataGenerator(rescale=1./255)\n\ntrain_generator = train_datagen.flow_from_directory(\n '/content/data/train',\n target_size=(img_height, img_width),\n batch_size=batch_size,\n class_mode='binary')\n\nvalidation_generator = val_datagen.flow_from_directory(\n '/content/data/validation',\n target_size=(img_height, img_width),\n batch_size=batch_size,\n class_mode='binary')\n\n#print(train_generator.samples, validation_generator.samples)\n#print(train_generator.filenames)\n#print(validation_generator.labels)\n\n#change val_acc to val_accuracy in case of error\n\nfilepath=\"vgg16_-{epoch:02d}-{val_acc:.2f}.h5\"\ncheckpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto')\n\nepochs = 25\n\nmodel.fit_generator(\n train_generator,\n steps_per_epoch=train_generator.samples//batch_size,\n epochs=epochs,\n validation_data=validation_generator,\n validation_steps=validation_generator.samples//batch_size,\n callbacks= [checkpoint])\n\n\n\n#uncomment for inference\n\"\"\"\nfrom keras.models import load_model\nfrom PIL import Image\nimport cv2\nimport matplotlib.pyplot as plt \nimport numpy as np\n\nclasses = ['cat', 'dog']\n\nmodel_path = \"/path/of/weight/file\"\n\nmodel = load_model(model_path)\n\nfile = \"/path/of/image\"\n\nimg_obj = Image.open(file)\nimg_org = np.array(img_obj)\nimg = cv2.resize(img_org,(224,224))\nimg = np.expand_dims(img, axis=0)/255.0\n\nprob = model.predict(img)\n\nid = 0\nif(prob>=0.5):\n id = 1\nprint(classes[id])\n#plt.imshow(img_org)\n#plt.show()\n\n\"\"\"\n\n\n\n\n\n\n","sub_path":"vgg16_training_inference.py","file_name":"vgg16_training_inference.py","file_ext":"py","file_size_in_byte":2930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"390921556","text":"from functools import reduce\n\nname = None\nteacher = None\naverageScore = None\n\ndef Name(_name):\n return (_name)\n\ndef Teacher(_Teacher):\n return (_Teacher)\n\ndef ScoreAvg(_Scores):\n sum = reduce((lambda x,y: int(x)+int(y)), _Scores)\n return int(sum) / len(_Scores)\n\ndef ScoreResult(_AverageScore):\n if _AverageScore >= 8:\n return (\"Well done %s, %s is pleased with your effort\" % (name, teacher))\n elif _AverageScore >= 6 and _AverageScore < 8:\n return (\"A good effort, %s, try harder next time\" % name)\n elif _AverageScore <= 5:\n return (\"%s, This is poor effort, try harder\" % name)\n\nname = Name(input(\"What is your name? - \"))\nteacher = Teacher(input(\"Teachers name? - \"))\n\nvalues = input(\"enter 4 test scores separated by commas - \")\nvalues = values.split(',')\naverageScore = ScoreAvg(values)\nprint(\"Average Score: %s\" % averageScore)\n\nprint(ScoreResult(averageScore))","sub_path":"IL/Task 4.8.1/Task 4.8.1.py","file_name":"Task 4.8.1.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"383610895","text":"# import matplotlib as mpl\r\n# mpl.use('Qt5Agg')\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nfrom math import log, exp\r\nimport numpy as np\r\nfrom matplotlib.ticker import MultipleLocator, FormatStrFormatter\r\n\r\n\r\ndef decay(half_life, curr_nuclii, max_time, timestep, current_time, original_nuclii, rad_list=[]):\r\n decay_const = log(0.5) / half_life\r\n if current_time <= max_time:\r\n remaining_nuclii = curr_nuclii * exp(decay_const * timestep)\r\n rad_list.append((remaining_nuclii / original_nuclii))\r\n return decay(half_life=half_life, curr_nuclii=remaining_nuclii, max_time=max_time, timestep=timestep,\r\n current_time=current_time + timestep, original_nuclii=original_nuclii, rad_list=rad_list)\r\n else:\r\n return rad_list\r\n\r\n\r\n\r\n\r\nhf_half_life = 8.9 * 10**6\r\nal_half_life = 7.17 * 10**5\r\nfe_half_life = 3.0 * 10**5\r\nw_182_w_184_terrestrial = 0.864900 # Kleine & Walker 2017 Tungsten Isotopes in Planets\r\nw_182_w_184_terrestrial_old = 0.864680 # Kleine et al. 2002 Eucrites\r\nmax_time = 100 * 10**6\r\noriginal_hf = 100\r\noriginal_al = 100\r\noriginal_fe = 100\r\ntimestep = 1 * 10**6\r\ntime_list = [i / (1 * 10**6) for i in np.arange(0, max_time + timestep, timestep)]\r\n\r\nhf_decay = decay(half_life=hf_half_life, curr_nuclii=original_hf, max_time=max_time, timestep=timestep,\r\n current_time=timestep, original_nuclii=original_hf, rad_list=[original_hf / original_hf])\r\nal_decay = decay(half_life=al_half_life, curr_nuclii=original_al, max_time=max_time, timestep=timestep,\r\n current_time=timestep, original_nuclii=original_al, rad_list=[original_al / original_al])\r\nfe_decay = decay(half_life=fe_half_life, curr_nuclii=original_fe, max_time=max_time, timestep=timestep,\r\n current_time=timestep, original_nuclii=original_fe, rad_list=[original_fe / original_fe])\r\n\r\nw_abundance = [1 - i for i in hf_decay]\r\n\r\n\r\nmy_5_index = time_list.index(5)\r\nhf_at_5 = hf_decay[my_5_index]\r\nal_at_5 = al_decay[my_5_index]\r\nfe_at_5 = fe_decay[my_5_index]\r\n\r\neucrite_df = pd.read_excel(\"eucrites_kleine_2002.xlsx\")\r\n\r\nsample_name_list = []\r\nw_182_w_184_list = []\r\nhf_180_w_184_list = []\r\nepsilon_w_list = []\r\n\r\nfor row in eucrite_df.index:\r\n sample_name = eucrite_df['Sample'][row]\r\n w_182_w_184 = eucrite_df['182W/184W'][row]\r\n hf_180_w_184 = eucrite_df['180Hf/184W'][row]\r\n epsilon_w = eucrite_df['epsilon_W'][row]\r\n\r\n w_182_w_184_time = [i * float(w_182_w_184) for i in w_abundance]\r\n epsilon_w_time = [((i / w_182_w_184_terrestrial_old) - 1) * (10**4) for i in w_182_w_184_time]\r\n\r\n sample_name_list.append(sample_name)\r\n w_182_w_184_list.append(w_182_w_184_time)\r\n epsilon_w_list.append(epsilon_w_time)\r\n","sub_path":"Thesis_Code_1/model_integration_radioactivity.py","file_name":"model_integration_radioactivity.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"131901794","text":"import pyglet\nimport pymunk\nfrom pymunk.body import Body\n\nimport resources\nimport random\nfrom map_objects.map_entity import MapEntity\nfrom resources import segment_height, segment_width\nfrom pyglet.gl import *\n\n\nclass BlockPlatform(MapEntity):\n BLOCK_PLATFORM_LTOP = pyglet.resource.image(\"tile-1s.png\")\n BLOCK_PLATFORM_TOP = pyglet.resource.image('tile-2s.png')\n BLOCK_PLATFORM_RTOP = pyglet.resource.image('tile-3s.png')\n BLOCK_PLATFORM_LE = pyglet.resource.image('tile-4s.png')\n BLOCK_PLATFORM_F = pyglet.resource.image('tile-5s.png')\n BLOCK_PLATFORM_RE = pyglet.resource.image('tile-6s.png')\n BLOCK_PLATFORM_UPRC = pyglet.resource.image('tile-7s.png')\n BLOCK_PLATFORM_FCL = pyglet.resource.image('tile-8s.png')\n BLOCK_PLATFORM_BOTTOM = pyglet.resource.image('tile-9s.png')\n BLOCK_PLATFORM_FCR = pyglet.resource.image('tile-10s.png')\n BLOCK_PLATFORM_UPLC = pyglet.resource.image('tile-11s.png')\n BLOCK_PLATFORM_BLC = pyglet.resource.image('tile-12s.png')\n BLOCK_PLATFORM_BRC = pyglet.resource.image('tile-13s.png')\n BLOCK_PLATFORM_BONES = [pyglet.resource.image('bone-bg-1.png'),\n pyglet.resource.image('bone-bg-2.png'),\n pyglet.resource.image('bone-bg-3.png'),\n pyglet.resource.image('bone-bg-4.png')]\n\n def __init__(self, *args, **kwargs):\n self.space = kwargs['space']\n self.__init_physics(args)\n self.image = self.create_image(args)\n\n def __init_physics(self, args):\n self.body = pymunk.Body(mass=0, moment=0, body_type=Body.KINEMATIC)\n self.shps = MapEntity.get_shapes(self.body, args)\n self.body.player_on_the_platform = False\n self.space.add(self.body)\n self.space.add(self.shps)\n origin = MapEntity.get_origin(args)\n self.x, self.y = origin[0] * segment_width, origin[1] * segment_height\n\n @staticmethod\n def get_image(scaled_translated_levels, x, y, background=True):\n n = len(scaled_translated_levels)\n cnt = 0\n for s in scaled_translated_levels:\n cnt += 1\n if cnt == n:\n cnt = 0\n if s[0] < x <= scaled_translated_levels[cnt][0] and scaled_translated_levels[cnt][1] > y and background:\n if random.randrange(1, 100) < 5:\n container_bg_image = pyglet.image.Texture.create(resources.segment_width, resources.segment_height)\n container_bg_image.blit_into(BlockPlatform.BLOCK_PLATFORM_F.get_image_data(), 0, 0, 0)\n glEnable(GL_BLEND)\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\n container_bg_image.blit_into(random.choice(BlockPlatform.BLOCK_PLATFORM_BONES).get_image_data(), 0, 0, 0)\n return container_bg_image\n return BlockPlatform.BLOCK_PLATFORM_F\n\n if x == scaled_translated_levels[-1][0] and y == 0:\n return BlockPlatform.BLOCK_PLATFORM_BRC\n elif x == 0 and y == 0:\n return BlockPlatform.BLOCK_PLATFORM_BLC\n elif s[1] - segment_height < y < scaled_translated_levels[cnt][1] - segment_height and x == s[0]:\n return BlockPlatform.BLOCK_PLATFORM_LE\n elif s[1] - segment_height > y > scaled_translated_levels[cnt][1] - segment_height and x == s[0]:\n return BlockPlatform.BLOCK_PLATFORM_RE\n elif y == 0 and x < scaled_translated_levels[-1][0]:\n return BlockPlatform.BLOCK_PLATFORM_BOTTOM\n elif y == s[1] - segment_height:\n if s[0] - segment_width == x and s[1] < scaled_translated_levels[cnt][1]:\n return BlockPlatform.BLOCK_PLATFORM_UPRC\n elif scaled_translated_levels[cnt - 2][0] + segment_width == x and s[1] < \\\n scaled_translated_levels[cnt - 2][1]:\n return BlockPlatform.BLOCK_PLATFORM_UPLC\n elif scaled_translated_levels[cnt - 2][0] == x and s[1] > scaled_translated_levels[cnt - 2][1]:\n return BlockPlatform.BLOCK_PLATFORM_LTOP\n elif s[0] > x > scaled_translated_levels[cnt - 2][0]:\n return BlockPlatform.BLOCK_PLATFORM_TOP\n elif s[0] == x and s[1] > scaled_translated_levels[cnt][1]:\n return BlockPlatform.BLOCK_PLATFORM_RTOP\n elif scaled_translated_levels[cnt - 2][0] == x and s[1] < scaled_translated_levels[cnt - 2][1]:\n return BlockPlatform.BLOCK_PLATFORM_FCR\n elif s[0] == x and s[1] < scaled_translated_levels[cnt][1]:\n return BlockPlatform.BLOCK_PLATFORM_FCL\n\n @staticmethod\n def container_blit(container, scaled_translated_levels, width, height, background):\n for x in range(0, (width + 1) * resources.segment_width, resources.segment_width):\n for y in range(0, (height + 1) * resources.segment_height, resources.segment_height):\n im = BlockPlatform.get_image(scaled_translated_levels, x, y, background=background)\n if im:\n container.blit_into(im.get_image_data(), x, y, 0)\n return container\n\n @staticmethod\n def get_bounding_box(args):\n x_coordinates = []\n y_coordinates = []\n for i in range(len(args)):\n x_coordinates.append(list(args)[i][0])\n y_coordinates.append(list(args)[i][1])\n\n width = max(x_coordinates) - min(x_coordinates)\n height = max(y_coordinates) - min(y_coordinates)\n return width, height\n\n @staticmethod\n def create_image(args):\n width, height = BlockPlatform.get_bounding_box(args)\n\n container_image = pyglet.image.Texture.create((width + 1) * resources.segment_width,\n (height + 1) * resources.segment_height)\n\n scaled_translated_levels = list(MapEntity.scale_block(MapEntity.translate_block(args)))\n\n container_image = BlockPlatform.container_blit(container_image, scaled_translated_levels, width, height, True)\n container_image = BlockPlatform.container_blit(container_image, scaled_translated_levels, width, height, False)\n\n return container_image\n","sub_path":"map_objects/block_platform.py","file_name":"block_platform.py","file_ext":"py","file_size_in_byte":6275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"491626777","text":"import re\r\nimport sys\r\nimport argparse\r\nfrom urllib.parse import urlparse\r\n\r\n\r\nurlmap = {}\r\npatterns_seen = []\r\ncontent_patterns = []\r\n\r\nblacklist = r'(post|blog)s?|docs|support/|/(\\d{4}|pages?)/\\d+/'\r\nstatic_exts = ('js', 'css', 'png', 'pdf', 'jpg', 'jpeg', 'ico', 'bmp', 'svg', 'gif','woff','woff2','ttf','otf','mp3','mp4','avi','eot')\r\n\r\n\r\ndef params_to_dict(params: str) -> list:\r\n\t\"\"\"\r\n\tconverts query string to dict\r\n\t\"\"\"\r\n\tthe_dict = {}\r\n\tif params:\r\n\t\tfor pair in params.split('&'):\r\n\t\t\tparts = pair.split('=')\r\n\t\t\ttry:\r\n\t\t\t\tthe_dict[parts[0]] = parts[1]\r\n\t\t\texcept IndexError:\r\n\t\t\t\tpass\r\n\treturn the_dict\r\n\r\n\r\ndef dict_to_params(params: dict) -> str:\r\n\t\"\"\"\r\n\tconverts dict of params to query string\r\n\t\"\"\"\r\n\tstringed = [name + '=' + value for name, value in params.items()]\r\n\treturn '?' + '&'.join(stringed)\r\n\r\n\r\ndef compare_params(og_params: list, new_params: dict) -> bool:\r\n\t\"\"\"\r\n\tchecks if new_params contain a param\r\n\tthat doesn't exist in og_params\r\n\t\"\"\"\r\n\tog_set = set([])\r\n\tfor each in og_params:\r\n\t\tfor key in each.keys():\r\n\t\t\tog_set.add(key)\r\n\treturn set(new_params.keys()) - og_set\r\n\r\n\r\ndef is_seen(path: str) -> bool:\r\n\t\"\"\"\r\n\tchecks if a url matches any recorded patterns\r\n\t\"\"\"\r\n\tfor pattern in patterns_seen:\r\n\t\tif re.search(pattern, path):\r\n\t\t\treturn compare_params(path)\r\n\r\n\r\ndef is_content(path: str) -> bool:\r\n\t\"\"\"\r\n\tchecks if a path is likely to contain\r\n\thuman written content e.g. a blog\r\n\t\"\"\"\r\n\tif path.count('-') > 3:\r\n\t\tnew_parts = []\r\n\t\tfor part in re.escape(path).split('/'):\r\n\t\t\tif part.count('-') > 3:\r\n\t\t\t\tnew_parts.append('[^/]+')\r\n\t\t\telse:\r\n\t\t\t\tnew_parts.append(part)\r\n\t\tcontent_patterns.append('/'.join(new_parts))\r\n\t\treturn True\r\n\treturn False\r\n\r\n\r\ndef create_pattern(path: str) -> str:\r\n\t\"\"\"\r\n\tcreates patterns for urls with integers in them\r\n\t\"\"\"\r\n\tnew_parts = []\r\n\tfor part in re.escape(path).split('/'):\r\n\t\tif part.isdigit():\r\n\t\t\tnew_parts.append('\\\\d+')\r\n\t\telse:\r\n\t\t\tnew_parts.append(part)\r\n\treturn '/'.join(new_parts)\r\n\r\n\r\ndef pattern_exists(pattern: str) -> bool:\r\n\t\"\"\"\r\n\tchecks if a int pattern exists\r\n\t\"\"\"\r\n\tfor i, seen_pattern in enumerate(patterns_seen):\r\n\t\tif pattern in seen_pattern:\r\n\t\t\tpatterns_seen[i] = pattern\r\n\t\t\treturn True\r\n\t\telif seen_pattern in pattern:\r\n\t\t\treturn True\r\n\treturn False\r\n\r\n\r\ndef matches_patterns(path: str) -> bool:\r\n\t\"\"\"\r\n\tchecks if the url matches any of the int patterns\r\n\t\"\"\"\r\n\tfor pattern in patterns_seen:\r\n\t\tif re.search(pattern, path):\r\n\t\t\treturn True\r\n\treturn False\r\n\r\n\r\ndef is_blacklisted(path: str) -> bool:\r\n\t\"\"\"\r\n\tchecks if the url matches the blacklist regex\r\n\t\"\"\"\r\n\treturn re.search(blacklist, path)\r\n\r\n\r\ndef has_bad_ext(path: str) -> bool:\r\n\t\"\"\"\r\n\tchecks if a url has a blacklisted extension\r\n\t\"\"\"\r\n\treturn False if '/' in path.split('.')[-1] else path.lower().endswith(static_exts)\r\n\r\ndef main():\r\n\tif not sys.stdin.isatty():\r\n\t\tfor line in sys.stdin:\r\n\t\t\tparsed = urlparse(line.strip())\r\n\t\t\thost = parsed.scheme + '://' + parsed.netloc\r\n\t\t\tpath, params = parsed.path, params_to_dict(parsed.query)\r\n\t\t\tif host not in urlmap:\r\n\t\t\t\turlmap[host] = {}\r\n\t\t\tif has_bad_ext(path):\r\n\t\t\t\tcontinue\r\n\t\t\tif not params:\r\n\t\t\t\tif is_content(path) or is_blacklisted(path):\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tpattern = create_pattern(path)\r\n\t\t\t\tif matches_patterns(path):\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tif '\\\\d+' in pattern and not pattern_exists(pattern):\r\n\t\t\t\t\tpatterns_seen.append(pattern)\r\n\t\t\tif path not in urlmap[host]:\r\n\t\t\t\turlmap[host][path] = [params] if params else []\r\n\t\t\telif params and compare_params(urlmap[host][path], params):\r\n\t\t\t\turlmap[host][path].append(params)\r\n\tfor host, value in urlmap.items():\r\n\t\tfor path, params in value.items():\r\n\t\t\tif params:\r\n\t\t\t\tfor param in params:\r\n\t\t\t\t\tprint(host + path + dict_to_params(param))\r\n\t\t\telif '-' in path:\r\n\t\t\t\tmatched = False\r\n\t\t\t\tfor pattern in content_patterns:\r\n\t\t\t\t\tif re.search(pattern, path):\r\n\t\t\t\t\t\tmatched = True\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\tif not matched:\r\n\t\t\t\t\tprint(host + path)\r\n\t\t\telse:\r\n\t\t\t\tprint(host + path)\r\n","sub_path":"uro/uro.py","file_name":"uro.py","file_ext":"py","file_size_in_byte":3932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"216322284","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import api, fields, models\nfrom odoo.exceptions import UserError\n\nclass WorkorderRecicle(models.TransientModel):\n _name = \"mrp.production.transfer\"\n _description = \"Lanza una ventana de transferencia\"\n\n location_id = fields.Many2one('stock.location','Ubicacion origen',required='True')\n location_dest_id = fields.Many2one('stock.location','Ubicacion destino',required='True')\n origin = fields.Char('Documento Origen')\n product_id = fields.Many2one(\n 'product.product', 'Producto',\n required=True)\n qty = fields.Float(string='Cantidad')\n product_uom_id = fields.Many2one('product.uom', 'Unidad de medida', required=True)\n transfer_type = fields.Selection([\n ('abastecimiento','Abastecimiento'),\n ('aumento','Falta de material'),\n ('devolucion','Devolucion'),\n ],string='Motivo de transferencia',default='abastecimiento',required='True')\n\n # @api.onchange('product_id')\n # def onchange_product_id(self):\n # if self.product_id:\n # self.product_uom_id = self.product_id.uom_id.id\n # active_id = self.env.context.get('active_id', False)\n # production = self.env[self.env.context.get('active_model')].search([('id', '=', active_id)])\n # move = self._get_move(production,'confirmed')\n # if move:\n # self.qty = move.product_uom_qty\n # else:\n # self.qty = 0\n\n\n @api.multi\n def button_transfer(self):\n self.ensure_one()\n active_id = self.env.context.get('active_id', False)\n order = self.env[self.env.context.get('active_model')].search([('id', '=', active_id)])\n production = order.raw_material_production_id\n if(self.transfer_type=='abastecimiento'):\n #if(self._get_move(production,['confirmed'])):\n self._done_transfer()\n production.action_assign()\n #else: raise UserError('No se puede abastecer una orden en proceso')\n elif(self.transfer_type=='aumento'):\n # Obtenemos el move que sera modificado\n move_affected = self._get_move(production,['assigned'])\n move_affected.write({\n 'product_uom_qty':move_affected.product_uom_qty+self.qty,\n 'quantity_done_store':move_affected.quantity_done_store+self.qty,\n })\n # Transferir desde almacen\n self._done_transfer()\n # Reserva la cantidad transferida al move del mrp.production\n quants = self.env['stock.quant'].quants_get_preferred_domain(\n self.qty, move_affected)\n self.env['stock.quant'].quants_reserve(quants, move_affected)\n elif(self.transfer_type=='devolucion'):\n move_affected = self._get_move(production, ['assigned'])\n move_affected.do_unreserve()\n self._done_transfer()\n move_affected.write({\n 'product_uom_qty': move_affected.product_uom_qty - self.qty,\n 'quantity_done_store': move_affected.quantity_done_store - self.qty,\n })\n quants = self.env['stock.quant'].quants_get_preferred_domain(\n move_affected.product_uom_qty, move_affected)\n self.env['stock.quant'].quants_reserve(quants, move_affected)\n\n def _prepare_move_values(self):\n self.ensure_one()\n return {\n 'name': self.origin,\n 'origin': self.origin,\n 'product_id': self.product_id.id,\n 'product_uom': self.product_uom_id.id,\n 'product_uom_qty': self.qty,\n 'location_id': self.location_id.id,\n 'location_dest_id': self.location_dest_id.id\n }\n def _get_move(self,production,estado):\n move = production.move_raw_ids.filtered(lambda x: x.product_id.id == self.product_id.id and x.state in estado)\n return move\n\n def _done_transfer(self):\n self.ensure_one()\n move = self.env['stock.move'].create(self._prepare_move_values())\n quants = self.env['stock.quant'].quants_get_preferred_domain(\n move.product_qty, move)\n self.env['stock.quant'].quants_reserve(quants, move)\n move.action_done()\n","sub_path":"wizard/mrp_transferencia.py","file_name":"mrp_transferencia.py","file_ext":"py","file_size_in_byte":4225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"22643389","text":"import logging\nimport random\n\nfrom .batch_filter import BatchFilter\nfrom gunpowder.coordinate import Coordinate\n\nlogger = logging.getLogger(__name__)\n\nclass SimpleAugment(BatchFilter):\n\n def __init__(self, transpose_only_xy=True):\n self.transpose_only_xy = transpose_only_xy\n\n def prepare(self, request):\n\n self.total_roi = request.get_total_roi()\n self.dims = self.total_roi.dims()\n\n self.mirror = [ random.randint(0,1) for d in range(self.dims) ]\n if self.transpose_only_xy:\n assert self.dims==3, \"Option transpose_only_xy only makes sense on 3D batches\"\n t = [1,2]\n random.shuffle(t)\n self.transpose = (0,) + tuple(t)\n else:\n t = list(range(self.dims))\n random.shuffle(t)\n self.transpose = tuple(t)\n\n logger.debug(\"mirror = \" + str(self.mirror))\n logger.debug(\"transpose = \" + str(self.transpose))\n\n reverse_transpose = [0]*self.dims\n for d in range(self.dims):\n reverse_transpose[self.transpose[d]] = d\n\n logger.debug(\"downstream request = \" + str(request))\n\n self.__transpose_request(request, reverse_transpose)\n self.__mirror_request(request, self.mirror)\n\n logger.debug(\"upstream request = \" + str(request))\n\n def process(self, batch, request):\n\n mirror = tuple(\n slice(None, None, -1 if m else 1)\n for m in self.mirror\n )\n\n for (volume_type, volume) in batch.volumes.items():\n\n volume.data = volume.data[mirror]\n if self.transpose != (0,1,2):\n volume.data = volume.data.transpose(self.transpose)\n\n logger.debug(\"total ROI: %s\"%self.total_roi)\n logger.debug(\"upstream %s ROI: %s\"%(volume_type,volume.roi))\n self.__mirror_roi(volume.roi, self.total_roi, self.mirror)\n logger.debug(\"mirrored %s ROI: %s\"%(volume_type,volume.roi))\n self.__transpose_roi(volume.roi, self.transpose)\n logger.debug(\"transposed %s ROI: %s\"%(volume_type,volume.roi))\n\n def __mirror_request(self, request, mirror):\n\n for (volume_type, roi) in request.volumes.items():\n self.__mirror_roi(roi, self.total_roi, mirror)\n\n def __transpose_request(self, request, transpose):\n\n for (volume_type, roi) in request.volumes.items():\n self.__transpose_roi(roi, transpose)\n\n def __mirror_roi(self, roi, total_roi, mirror):\n\n total_roi_offset = total_roi.get_offset()\n total_roi_shape = total_roi.get_shape()\n\n roi_offset = roi.get_offset()\n roi_shape = roi.get_shape()\n\n roi_in_total_offset = roi_offset - total_roi_offset\n end_of_roi_in_total = roi_in_total_offset + roi_shape\n roi_in_total_offset_mirrored = total_roi_shape - end_of_roi_in_total\n roi_offset = Coordinate(\n total_roi_offset[d] + roi_in_total_offset_mirrored[d] if mirror[d] else roi_offset[d]\n for d in range(self.dims)\n )\n\n roi.set_offset(roi_offset)\n\n def __transpose_roi(self, roi, transpose):\n\n offset = roi.get_offset()\n shape = roi.get_shape()\n offset = tuple(offset[transpose[d]] for d in range(self.dims))\n shape = tuple(shape[transpose[d]] for d in range(self.dims))\n roi.set_offset(offset)\n roi.set_shape(shape)\n","sub_path":"gunpowder/nodes/simple_augment.py","file_name":"simple_augment.py","file_ext":"py","file_size_in_byte":3397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"558531227","text":"# coding: utf-8\nfrom zope.component.hooks import setSite\nfrom Products.CMFCore.utils import getToolByName\nfrom collective.elasticsearch import hook\nfrom collective.elasticsearch.es import ElasticSearchCatalog\nfrom collective.elasticsearch.interfaces import IElasticSettings\nfrom collective.elasticsearch.testing import ElasticSearch_FUNCTIONAL_TESTING\nfrom collective.elasticsearch.testing import ElasticSearch_INTEGRATION_TESTING\nfrom plone.registry.interfaces import IRegistry\nimport transaction\nimport unittest2 as unittest\nfrom zope.component import getUtility\n\n\nclass BaseTest(unittest.TestCase):\n\n layer = ElasticSearch_INTEGRATION_TESTING\n\n def setUp(self):\n super(BaseTest, self).setUp()\n self.portal = self.layer['portal']\n self.request = self.layer['request']\n self.request.environ['testing'] = True\n self.app = self.layer['app']\n\n registry = getUtility(IRegistry)\n settings = registry.forInterface(IElasticSettings)\n settings.enabled = True\n\n self.catalog = getToolByName(self.portal, 'portal_catalog')\n self.catalog._elasticcustomindex = 'plone-test-index'\n self.es = ElasticSearchCatalog(self.catalog)\n self.es.convertToElastic()\n self.catalog.manage_catalogRebuild()\n # need to commit here so all tests start with a baseline\n # of elastic enabled\n self.commit()\n\n def commit(self):\n transaction.commit()\n # for some reason, commit() resets the site\n setSite(self.portal)\n\n def clearTransactionEntries(self):\n _hook = hook.getHook(self.es)\n _hook.remove = []\n _hook.index = {}\n\n def tearDown(self):\n super(BaseTest, self).tearDown()\n self.es.connection.indices.delete(index=self.es.index_name)\n self.clearTransactionEntries()\n\n\nclass BaseFunctionalTest(BaseTest):\n\n layer = ElasticSearch_FUNCTIONAL_TESTING","sub_path":"collective/elasticsearch/tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"442395360","text":"s=list(input())\nt=list(input())\nindic=[]\nothers=[]\nres=[]\ndic=dict()\n\ndef get_key (dict, value):\n for k, v in dict.items():\n if v == value:\n return k\n\n\nfor i in range(len(s)):\n dic.update({s[i]:i})\nfor i in range(len(t)):\n if t[i] not in s:\n others.append(t[i])\n else:\n indic.append(dic[t[i]])\nindic.sort()\nfor i in range(len(indic)):\n res.append(get_key(dic,indic[i]))\nprint(''.join(res))","sub_path":"Code/CodeRecords/2530/60785/314894.py","file_name":"314894.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"118144753","text":"from django.conf.urls import url\nfrom .views import PacienteHistoriaClinicaIngreso,PacienteHistoriaClinicaIngresoUpdate, PacienteGetHistoriaClinicaIngreso \n\n\nurlpatterns = [\n url(r'^editar_historia_clinica_ingreso/(?P\\d+)/$',PacienteHistoriaClinicaIngresoUpdate.as_view(), name='editar_historia_clinica_ingreso'),\n url(r'^paciente_historia_ingreso/$', PacienteGetHistoriaClinicaIngreso, name='paciente_historia_ingreso'), \n url(r'^historia_clinica_ingreso/$', PacienteHistoriaClinicaIngreso.as_view(), name='admision_paciente'),\n]\n\n","sub_path":"src/apps/historia_ingreso_homecare/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"619028161","text":"import scrapy\nimport json\nimport codecs\nimport urllib\n\nclass OpenTendersSpider(scrapy.Spider):\n name = \"tenders\"\n main_fields_map = {\n # id info\n 0: 'date',\n 1: 'number',\n 2: 'status',\n\n # common info\n 3: 'field',\n 4: 'item_description',\n\n # creator info\n 5: 'who_buys',\n 6: 'buyer_info',\n 7: 'buyer_contact',\n\n 8: 'creation_date',\n 9: 'end_date',\n 10: 'approximate_sum',\n 11: 'qualification_requirement',\n 12: 'time_place_show_documents',\n 13: 'document_prices',\n 14: 'proposition_place_terms',\n 15: 'other_data',\n\n # 16: 'lots',\n\n # 17: 'files',\n\n # 18: 'history'\n }\n\n lots_fields_map = {\n 0: 'lot_id',\n 1: 'lot_status',\n 2: 'lot_name',\n 3: 'lot_quantity',\n 4: 'lot_measure_unit',\n 5: 'lot_order_price',\n 6: 'lot_currency',\n 7: 'lot_full_price_usd',\n }\n\n history_fields_map = {\n 0: 'history_time',\n 1: 'history_event'\n }\n\n def start_requests(self):\n limit = 100000\n url = 'http://opentenders.by/tenders/current/'\n # yield scrapy.Request(url + str(50), callback=self.parse)\n for num in xrange(limit):\n response = scrapy.Request(url = url + str(num), callback=self.parse)\n self.state['items_count'] = self.state.get('items_count', 0) + 1\n yield response\n\n def parse(self, response):\n tender = {}\n root = response.css('div.tenders-view')\n basic_info = root.css('.info-body tr')\n for i in xrange(len(self.main_fields_map)):\n tender[self.main_fields_map[i]] = basic_info[i].css('td::text').extract_first()\n \n # lots info\n tender['lots'] = []\n lots = root.css('.detail_lots tbody tr')\n for lot in lots:\n lot_item = {}\n lot_columns = lot.css('td a::text')\n for i in xrange(len(self.lots_fields_map)):\n lot_item[self.lots_fields_map[i]] = lot_columns[i].extract()\n\n tender['lots'].append(lot_item)\n\n # files info\n tender['files'] = []\n files = root.css('.detail_files tbody tr')\n for file in files:\n file_item = {}\n file_item['link']= file.css('td a::attr(href)').extract_first()\n file_item['name'] = file.css('td a::text').extract_first()\n tender['files'].append(file_item)\n\n # history info\n tender['history'] = []\n events = root.css('.detail_history tbody tr')\n for event in events:\n event_item = {}\n event_columns = event.css('td')\n event_item['time']= event_columns[0].css('::text').extract()\n if len(event_columns[1].css('a')) == 0:\n event_item['name'] = event_columns[1].css('::text').extract()\n else:\n event_item['name'] = event_columns[1].css('a::text').extract_first()\n event_item['link'] = event_columns[1].css('a::attr(href)').extract_first() \n\n tender['history'].append(event_item)\n\n # print(json.dumps(tender))\n # with codecs.open('_{}'.format(filename), 'w', encoding=\"utf-8\") as outfile:\n # out_file = codecs.open(\"out_{}.json\".format(, \"w\", encoding=\"utf-8\")\n # json.dump(tender, out_file, ensure_ascii=False, )\n # out_file.close()\n\n yield tender","sub_path":"tutorial/tutorial/spiders/opentenders.py","file_name":"opentenders.py","file_ext":"py","file_size_in_byte":3464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"293334303","text":"import glob\nimport json\nimport logging\nimport os\n\nimport cv2\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset\n\nlogfilepath = \"\" # 따로 지정하지 않으면 terminal에 뜸\nif os.path.isfile(logfilepath):\n os.remove(logfilepath)\nlogging.basicConfig(filename=logfilepath, level=logging.INFO)\n\n\nclass DetectionDataset(Dataset):\n \"\"\"\n Parameters\n ----------\n path : str(jpg)\n Path to input image directory.\n transform : object\n \"\"\"\n CLASSES = ['smoke']\n\n def __init__(self, path='Dataset/train', transform=None, sequence_number=1):\n super(DetectionDataset, self).__init__()\n if sequence_number < 1 and isinstance(sequence_number, float):\n logging.error(f\"{sequence_number} Must be greater than 0\")\n return\n\n self._name = os.path.basename(path)\n self._sequence_number = sequence_number\n self._camera_list = glob.glob(os.path.join(path, \"images\", \"*\"))\n self._transform = transform\n self._items = []\n self._itemname = []\n self._make_item_list()\n\n def key_func(self, path):\n\n base_path = os.path.basename(path)\n except_format = os.path.splitext(base_path)[0]\n split_path = except_format.split(\"_\")\n number = int(split_path[-1])\n return number\n\n def _make_item_list(self):\n if self._camera_list:\n for camera_list in self._camera_list:\n for camera in glob.glob(os.path.join(camera_list, \"*\")):\n image_path_list = sorted(glob.glob(os.path.join(camera, \"*.jpg\")), key=lambda path: self.key_func(path))\n for i in range(len(image_path_list) - (self._sequence_number - 1)):\n image_path = image_path_list[i:i + self._sequence_number]\n label_path = image_path[-1].replace(\"images\", \"labels\").replace(\".jpg\", \".json\")\n self._items.append((image_path, label_path))\n # base_image = os.path.basename(image_path[-1])\n # name = os.path.splitext(base_image)[0]\n # self._itemname.append(name)\n self._itemname.append(image_path[-1])\n else:\n logging.info(\"The dataset does not exist\")\n\n def __getitem__(self, idx):\n\n images = []\n image_sequence_path, label_path = self._items[idx]\n for image_path in image_sequence_path:\n image = cv2.imread(image_path, flags=-1)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n images.append(image)\n images = np.concatenate(images, axis=-1)\n\n origin_images = images.copy()\n label = self._parsing(label_path) # dtype을 float 으로 해야 아래 단계에서 편하다\n origin_label = label.copy()\n\n if self._transform:\n result = self._transform(images, label, self._itemname[idx])\n if len(result) == 3:\n return result[0], result[1], result[2], torch.as_tensor(origin_images), torch.as_tensor(origin_label)\n else:\n return result[0], result[1], result[2], result[3], result[4], result[5], result[\n 6]\n else:\n return images, label, self._itemname[idx]\n\n def _parsing(self, path):\n json_list = []\n # json파일 parsing - 순서 -> topleft_x, topleft_y, bottomright_x, bottomright_y, center_x, center_y\n try:\n with open(path, mode='r') as json_file:\n dict = json.load(json_file)\n for i in range(len(dict[\"landmarkAttr\"])):\n if \"attributes\" in list(dict[\"landmarkAttr\"][i].keys()):\n xmin = int(dict[\"landmarkAttr\"][i][\"box\"][0]['x'])\n ymin = int(dict[\"landmarkAttr\"][i][\"box\"][0]['y'])\n xmax = int(dict[\"landmarkAttr\"][i][\"box\"][1]['x'])\n ymax = int(dict[\"landmarkAttr\"][i][\"box\"][1]['y'])\n category_id = dict[\"landmarkAttr\"][i][\"attributes\"][0]['selected']\n\n if isinstance(category_id, (list, tuple)):\n category_id = category_id[0]\n\n if category_id == \"0\":\n classes = 0\n # elif category_id == \"1\":\n # classes = 1\n elif category_id == 0:\n classes = 0\n # elif category_id == 1:\n # classes = 1\n elif category_id == \"smoke\":\n classes = 0\n # elif category_id == \"smoke\":\n # classes = 1\n else:\n xmin, ymin, xmax, ymax, classes = -1, -1, -1, -1, -1\n json_list.append((xmin, ymin, xmax, ymax, classes))\n else:\n print(f\"only image : {path}\")\n json_list.append((-1, -1, -1, -1, -1))\n except Exception:\n # print(f\"only image or json crash : {path}\")\n json_list.append((-1, -1, -1, -1, -1))\n return np.array(json_list, dtype=\"float32\") # 반드시 numpy여야함.\n else:\n return np.array(json_list, dtype=\"float32\") # 반드시 numpy여야함.\n\n @property\n def classes(self):\n return self.CLASSES\n\n @property\n def num_class(self):\n \"\"\"Number of categories.\"\"\"\n return len(self.CLASSES)\n\n def __str__(self):\n return self._name + \" \" + \"dataset\"\n\n def __len__(self):\n return len(self._items)\n\n\n# test\nif __name__ == \"__main__\":\n import random\n from core.utils.util.utils import plot_bbox\n\n sequence_number = 3\n root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))\n dataset = DetectionDataset(path=os.path.join(root, 'valid'), sequence_number=sequence_number)\n\n length = len(dataset)\n sequence_image, label, file_name = dataset[random.randint(0, length - 1)]\n print('images length:', length)\n print('sequence image shape:', sequence_image.shape)\n\n if sequence_number > 1:\n sequence_image = sequence_image[:,:,3*(sequence_number-1):]\n file_name = file_name[-1]\n\n plot_bbox(sequence_image, label[:, :4],\n scores=None, labels=label[:, 4:5],\n class_names=dataset.classes, colors=None, reverse_rgb=True, absolute_coordinates=True,\n image_show=True, image_save=False, image_save_path=\"result\", image_name=os.path.basename(file_name))\n '''\n images length: 1499\n sequence image shape: (720, 1280, 9)\n '''\n","sub_path":"CenterNet/core/utils/dataprocessing/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":6760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"434448001","text":"def findRestaurant(list1, list2):\n fav = { r:i for i,r in enumerate(list1) }\n res = []\n min_ix = float('inf')\n\n for i, r in enumerate(list2):\n if r in fav:\n if i + fav[r] == min_ix:\n res.append(r)\n\n elif i + fav[r] < min_ix:\n res = [r]\n min_ix = i + fav[r]\n\n return res\n\nprint(findRestaurant([\"Shogun\", \"Tapioca Express\", \"Burger King\", \"KFC\"],\n [\"Piatti\", \"The Grill at Torrey Pines\", \"Hungry Hunter Steakhouse\", \"Shogun\"]))\n","sub_path":"interviews/python/LC599_minimum_index_sum_of_two_lists.py","file_name":"LC599_minimum_index_sum_of_two_lists.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"623495298","text":"from glob import glob\nfrom os.path import join\nimport numpy as np\nfrom decim import glaze2 as gl\nimport pandas as pd\n\n\ndef load_logs_bids(subject, session, base_path, run='inference'):\n '''\n Returns filenames and pandas frame.\n '''\n if session == 'ses-1':\n modality = 'beh'\n else:\n modality = 'func'\n directory = join(base_path,\n subject,\n session,\n modality)\n if run == 'instructed':\n files = sorted(glob(join(directory, '*{}*.csv'.format(run))))\n else:\n files = sorted(glob(join(directory, '*{}*.tsv'.format(run))))\n if len(files) == 0:\n raise RuntimeError(\n 'No log file found for this block: %s, %s' %\n (subject, session))\n if run == 'inference':\n return {file[-26:-11]: pd.read_table(file) for file in files}\n elif run == 'instructed':\n return {file[-27:-11]: pd.read_csv(file) for file in files}\n\n\ndef stan_data_control(subject, session, path, swap=False):\n '''\n Returns dictionary with data that fits requirement of stan model.\n\n Takes subject, session, phase, list of blocks and filepath.\n '''\n lp = [0]\n logs = load_logs_bids(subject, session, path)[1]\n df = pd.concat(logs)\n lp = [0]\n for i in range(len(logs)):\n d = logs[i]\n block_points = np.array(d.loc[d.event == 'GL_TRIAL_LOCATION',\n 'value'].index).astype(int)\n lp.append(len(block_points))\n df = df.loc[df.event != '[0]']\n df = df.loc[df.event != 'BUTTON_PRESS'] # sometimes duplicates\n df.index = np.arange(len(df))\n points = df.loc[df.event == 'GL_TRIAL_LOCATION']['value'].astype(float)\n point_count = len(points)\n decisions = df.loc[df.event == 'CHOICE_TRIAL_RULE_RESP',\n 'value'].astype(float)\n if swap is True:\n decisions = decisions\n else:\n decisions = -(decisions[~np.isnan(decisions)].astype(int)) + 1\n dec_count = len(decisions)\n\n decisions = decisions.dropna()\n belief_indices = df.loc[decisions.index].index.values\n pointinds = np.array(points.index)\n dec_indices = np.searchsorted(pointinds, belief_indices) # np.searchsorted looks for position where belief index would fit into pointinds\n data = {\n 'I': dec_count,\n 'N': point_count,\n 'obs_decisions': decisions.values,\n 'x': points.values,\n 'obs_idx': dec_indices,\n 'B': len(logs),\n 'b': np.cumsum(lp)\n }\n\n return data\n\n\ndef performance_control(subject, session, base_path):\n '''\n Returns performance and no_answer percentage.\n '''\n logs = load_logs_bids(subject, session, base_path)[1]\n df = pd.concat(logs)\n df = df.loc[df.event != '[0]']\n df = df.loc[df.event != '0']\n df = df.loc[df.event != 'BUTTON_PRESS']\n df.index = np.arange(len(df))\n rews = (df.loc[df.event == \"GL_TRIAL_REWARD\", 'value'])\n array = np.array(rews.values).astype(float)\n no_answer = np.count_nonzero(np.isnan(array))\n rresp = df.loc[df.event == 'CHOICE_TRIAL_RULE_RESP', 'value']\n rewards_manually = np.array(rresp).astype(float) +\\\n np.array(df.loc[rresp.index - 6, 'value']).astype(float)\n performance = np.sum(rewards_manually == 0.5) / len(rewards_manually)\n return performance, no_answer\n\n\ndef mean_rt(subject, session, base_path):\n '''\n Returns mean reaction time of given block.\n '''\n logs = load_logs_bids(subject, session, base_path)[1]\n df = pd.concat(logs)\n rt = df.loc[df.message == 'CHOICE_TRIAL_RT']['value'].astype(float)\n return rt.mean()\n\n\ndef accev(subject, session, base_path, H):\n '''\n returns accumulated evidence and rt at decision points.\n '''\n logs = load_logs_bids(subject, session, base_path)[1]\n df = pd.concat(logs)\n df = df.loc[df.event != '[0]']\n df = df.loc[df.event != '0']\n df = df.loc[df.event != 'BUTTON_PRESS']\n df.index = np.arange(len(df))\n choices = (df.loc[df.event == \"CHOICE_TRIAL_RULE_RESP\", 'value']\n .astype(float))\n belief_indices = df.loc[choices.index - 11].index.values\n rt = df.loc[df.event == 'CHOICE_TRIAL_RT']['value']\n accum_ev = gl.belief(df, H).loc[belief_indices].values\n return pd.DataFrame({'reaction time': rt,\n 'accumulated evidence': accum_ev})\n","sub_path":"decim/glaze_control.py","file_name":"glaze_control.py","file_ext":"py","file_size_in_byte":4377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"343060513","text":"# -*- coding: utf-8 -*-\n\"\"\"This file is a TTPConsulting spider created on top of the ATSSpider\nscrapy crawl ttpconsulting -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"http://www.ttpconsulting.com.hk/home/job_search/0/0/0/0\"\n\nsample url:\n http://www.ttpconsulting.com.hk/home/job_search/0/0/0/0\n\"\"\"\n\nfrom urlparse import urljoin\nfrom re import compile\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import Prefix, Replace\n\n\nclass TTPConsulting(ATSSpider):\n\n name = 'ttpconsulting'\n ref_re = compile(r\"(\\d+)$\")\n count_re = compile(r\"Total : (\\d+)\")\n\n def parse(self, response):\n sel = Selector(response)\n if not self.expected_job_count_set:\n count = sel.xpath(\n '//div[@class=\"page\"]/text()[1]').re(self.count_re)\n if count:\n self.expected_job_count = count\n\n jobs = sel.xpath('//div[@class=\"search_list\"]/dl')\n for job in jobs:\n job_url = job.xpath('./dd/a/@href').extract()\n if job_url:\n meta = {\n 'loc': job.xpath(\n './dd[@class=\"search_dd1\"][1]/text()'\n ).extract(),\n 'title': job.xpath('./dt[1]/text()').extract(),\n }\n yield Request(\n job_url[0], callback=self.parse_job_callback(), meta=meta\n )\n\n next_url = sel.xpath('//a[text()=\" Next \"]/@href').extract()\n if next_url:\n yield Request(next_url[0], callback=self.parse)\n\n def parse_job(self, response):\n loader = BrightcorpItemLoader(response=response)\n loader.add_value('url', response.url)\n loader.add_value('title', response.meta.get('title'))\n loader.add_value(\n 'location', response.meta.get('loc'), Replace('Location:')\n )\n loader.add_value(\n 'referencenumber', response.url,\n Prefix('%s-' % self.name), re=self.ref_re\n )\n loader.add_xpath('description', '//div[@class=\"job_info\"]')\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/ttpconsulting.py","file_name":"ttpconsulting.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"374596417","text":"from inventory.models import Item, Inventory, ItemType\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.db.models import Count, Sum\nfrom production.orderedset import OrderedSet\nfrom collections import OrderedDict\nfrom django.core.exceptions import ObjectDoesNotExist\n\ndef index(request):\n inventory = {}\n item_list = Item.objects.all()\n item_type = ItemType.objects.all()\n \n tempDict = {}\n \n for type in item_type:\n for itemSpecific in item_list:\n if type.item_type == str(itemSpecific.category):\n tempDict[itemSpecific] = Inventory.objects.filter(item=itemSpecific).aggregate(total=Sum(\"quantity\"))[\"total\"] \n \n inventory[type] = tempDict.copy()\n tempDict.clear()\n \n return render_to_response('inventory/index.html', { 'item_list': item_list,\n 'inventory': inventory,\n }, RequestContext(request))\n \ndef detail(request, item):\n item_detail = Inventory.objects.filter(item__id=item)\n item_title = str(Item.objects.get(id=item))\n total = Inventory.objects.filter(item__id=item).aggregate(total=Sum(\"quantity\"))[\"total\"]\n\n \n return render_to_response('inventory/detail.html', {'item_detail': item_detail,\n 'item_title': item_title,\n 'total': total,\n }, RequestContext(request))\n","sub_path":"inventory/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"14289090","text":"import util\nfrom models import StarLog\n\n\ndef jump(session, fleet, inputs, outputs):\n if len(inputs) == 0:\n raise Exception('jump must contain at least one input')\n if len(outputs) == 0:\n raise Exception('jump must contain at least oun output')\n input_ship_count = 0\n origin_system_id = inputs[0].star_system_id\n for current_input in inputs:\n if current_input.fleet_id != fleet.id:\n raise Exception('jump must consist of ships from a single fleet')\n if current_input.star_system_id != origin_system_id:\n raise Exception('jump inputs must start from the same origin')\n input_ship_count += current_input.count\n output_ship_count = 0\n destination_system_id = None\n for current_output in outputs:\n if current_output.star_system_id != origin_system_id:\n destination_system_id = current_output.star_system_id\n break\n if destination_system_id is None:\n raise Exception('jump must consist of at least one output in another system')\n for current_output in outputs:\n if current_output.fleet_id != fleet.id:\n raise Exception('jump must consist of ships from a single fleet')\n if current_output.star_system_id == origin_system_id:\n input_ship_count -= current_output.count\n elif current_output.star_system_id == destination_system_id:\n output_ship_count += current_output.count\n else:\n raise Exception('jump outputs must end in the same origin or destination')\n origin_system = session.query(StarLog).filter_by(id=origin_system_id).first()\n destination_system = session.query(StarLog).filter_by(id=destination_system_id).first()\n ship_cost = util.get_jump_cost(origin_system.hash, destination_system.hash, input_ship_count)\n if ship_cost == input_ship_count:\n raise Exception('jump cannot have zero ships reach destination')\n if ship_cost != (input_ship_count - output_ship_count):\n raise Exception('jump cost does not match expected cost of %s' % ship_cost)\n\n\ndef attack(fleet, inputs, outputs):\n if len(inputs) < 2:\n raise Exception('jump must contain at least two inputs')\n \n ship_count = 0\n enemy_ship_count = 0\n origin_system_id = inputs[0].star_system_id\n enemy_fleet_id = None\n for current_input in inputs:\n if current_input.star_system_id != origin_system_id:\n raise Exception('attack inputs must be from the same origin')\n if current_input.fleet_id == fleet.id:\n ship_count += current_input.count\n else:\n if enemy_fleet_id is None:\n enemy_fleet_id = current_input.fleet_id\n elif enemy_fleet_id != current_input.fleet_id:\n raise Exception('an attack may only consist of two fleets')\n enemy_ship_count += current_input.count\n \n output_ship_count = 0\n output_enemy_ship_count = 0\n for current_output in outputs:\n if current_output.count == 0:\n raise Exception('attack output cannot be zero')\n if current_output.star_system_id != origin_system_id:\n raise Exception('attack outputs must be in the same origin')\n if current_output.fleet_id == fleet.id:\n output_ship_count += current_output.count\n elif current_output.fleet_id == enemy_fleet_id:\n output_enemy_ship_count += current_output.count\n else:\n raise Exception('an attack output must be from the original fleets')\n \n if ship_count < enemy_ship_count:\n if enemy_ship_count - ship_count != output_enemy_ship_count:\n raise Exception('attack input and output count mismatch')\n elif enemy_ship_count < ship_count:\n if ship_count - enemy_ship_count != output_ship_count:\n raise Exception('attack input and output count mismatch')\n elif output_ship_count + output_enemy_ship_count != 0:\n raise Exception('attack input and output count mismatch')\n","sub_path":"verify.py","file_name":"verify.py","file_ext":"py","file_size_in_byte":4005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"219498261","text":"from datetime import datetime\nimport pytest\nfrom ...examples.qt_search import Searches\n\n\n@pytest.fixture(scope=\"function\")\ndef make_test_searches(qtbot, request):\n searchess = []\n\n def actual_factory(*model_args, **model_kwargs):\n model_kwargs[\"show\"] = model_kwargs.pop(\n \"show\", request.config.getoption(\"--show-window\")\n )\n searches = Searches(*model_args, **model_kwargs)\n searchess.append(searches)\n return searches\n\n yield actual_factory\n\n for searches in searchess:\n searches.close()\n\n\ndef test_searches(make_test_searches):\n make_test_searches()\n\n\ndef test_manipulating_times(make_test_searches):\n searches = make_test_searches()\n searches[0].input.since = 0\n searches[0].input.since = datetime(1985, 11, 15)\n searches[0].input.until = datetime(1985, 11, 15)\n","sub_path":"bluesky_widgets/_qt/tests/test_qt_search.py","file_name":"test_qt_search.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"471757327","text":"#!/usr/bin/env python3\n\nfrom terminaltables import AsciiTable\nfrom bs4 import BeautifulSoup as BS\nfrom urllib.parse import urlencode\nfrom functools import wraps\nfrom gevent import monkey\nimport gevent\nmonkey.patch_all()\nimport requests\n\nparam = {\n 'key': 'AIzaSyCVAXiUzRYsML1Pv6RwSG1gunmMikTzQqY',\n 'cse_tok':'ABPF6HiCHIVJrxlXCDeQ_MhDTTgcCDw83g:1529916884758',\n 'sig': '4aa0772189af4c17ea7ec181af2bca15',\n 'cx': '008614474937839428461:i55eqojmyye',\n 'rsz': 'filtered_cse',\n 'num': '20',\n 'hl': 'ko',\n 'prettyPrint': 'true',\n 'source': 'gcsc',\n 'gss': '.com',\n 'q': '',\n}\n\nheaders = {'User-Agent': \n \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3)\"\\\n \"AppleWebKit/537.36 (KHTML, like Gecko) \"\\\n \"Chrome/66.0.3359.181 Safari/537.36\"}\n\n\ndef debug(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n print('[*]Running from {} ....'.format(func.__name__))\n return func(*args, **kwargs)\n return wrapper\n\n\n@debug\ndef get_magnet(bbs_url):\n soup = BS(requests.get(bbs_url, headers=headers).content, 'lxml')\n for s in soup.find_all('a', href=True):\n text = s['href']\n if text.startswith('magnet'):\n return text\n\n\n@debug\ndef print_torrent_table():\n keyword = input('input the keyword: ')\n param['q'] = keyword\n all_data = []\n bbs = []\n base_url = 'https://www.googleapis.com/customsearch/v1element'\n url = '{}?{}'.format(base_url, urlencode(param))\n r = requests.get(url, headers=headers)\n results = r.json()['results']\n for n, result in enumerate(results, 1):\n mdata = result['richSnippet']['metatags']\n description = mdata['ogDescription'].split('용량:')\n title = mdata['ogTitle'][:40]\n file_size = description[-1].strip()\n bbs_link = mdata['ogUrl'].replace('/m/bbs/', '/bbs/')\n bbs.append(bbs_link)\n all_data.append([n, title, file_size])\n\n jobs = [gevent.spawn(get_magnet, bbs_url) for bbs_url in bbs]\n gevent.joinall(jobs)\n magnet_urls = [job.value for job in jobs]\n \n for n, d in enumerate(all_data):\n d.append(magnet_urls[n])\n\n all_data.insert(0, ['No', 'Title', 'File size', 'Magnet'])\n table = AsciiTable(all_data)\n for n in range(len(all_data)):\n table.justify_columns[n] = 'center'\n print(table.table)\n\n\nif __name__ == '__main__':\n print_torrent_table()\n","sub_path":"tocorps.py","file_name":"tocorps.py","file_ext":"py","file_size_in_byte":2398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"649036389","text":"import requests\nimport csv\nimport pandas as pd\nimport time\nfrom datetime import datetime\nimport os\nimport subprocess\n\n# check and read env variables\nMANDATORY_ENV_VARS = [\"EE_token\", \"EE_url\", \"numAttempts\", \"delayBetweenAttempts\", \"delayBetweenPings\", \"numLinesInCSV\",\"debugLevel\"]\n\nfor var in MANDATORY_ENV_VARS:\n if var not in os.environ:\n raise EnvironmentError(\"Failed because {} is not set.\".format(var))\n\nEE_token = os.environ[\"EE_token\"]\nEE_url = os.environ[\"EE_url\"]\nnumAttempts = int(os.environ[\"numAttempts\"]) # how many times it tries to get the data before giving up. With every attempt data for a longer period is extracted\ndelayBetweenAttempts = int(os.environ[\"delayBetweenAttempts\"]) # measured in seconds\ndelayBetweenPings = int(os.environ[\"delayBetweenPings\"]) # measured in seconds -> how often in normal times it should retrieve data\nnumLines = int(os.environ[\"numLinesInCSV\"])\ndebugLevel = int(os.environ[\"debugLevel\"])\n\n\n# making sure that they make sense\nassert (len(EE_token) >= 30)\nassert (len(EE_url) >= 30)\nassert (numAttempts >= 5)\nassert (delayBetweenAttempts >= 0)\nassert (delayBetweenPings >= 20)\nassert (numLines >= 5000)\n\n# testing only - to be removed\nif debugLevel >= 2:\n print(\"EE_token:\", EE_token, \" EE_url:\", EE_url, \" numAttempts:\",numAttempts, \" delayBetweenAttempts:\",\n delayBetweenAttempts, \" delayBetweenPings:\", delayBetweenPings, \"debugLevel\", debugLevel)\n\n# init\nstep = 0\nstopFL = False\nstartFL = True\nstartNewDF = True\nsleepEnabledFL = True\nerrorCount = 0\n\ncsv.register_dialect('pipes', lineterminator='\\r\\n', delimiter=',')\n\nwhile (not stopFL) and errorCount < numAttempts:\n\n if sleepEnabledFL:\n time.sleep(delayBetweenPings)\n step = step + 1\n else:\n time.sleep(delayBetweenAttempts)\n\n\n reqStr=\"{}/gws/wfs?authkey={}&service=WFS&version=1.1.0\\\n &request=GetFeature&outputformat=csv&typeName=exactAIS:LVI\\\n &cql_filter=ts_insert_utc>=dateFormat('yyyyMMddHHmmss',currentDate('-PT{}M{}S'))\"\\\n .format(EE_url, EE_token, 5 if startFL else 0, delayBetweenPings + delayBetweenAttempts*errorCount+15)\n\n if debugLevel > 0:\n print(reqStr)\n\n try:\n resp = requests.get(reqStr, timeout=(5,10))\n except requests.exceptions.Timeout:\n # set up for a retry, or continue in a retry loop\n errorCount = errorCount + 1\n sleepEnabledFL = False\n print (\"Timeout waiting for response!, Attempts:\",errorCount)\n except requests.exceptions.TooManyRedirects:\n # Tell the user\n print (\"Too many redirects, Attempts:\", errorCount)\n errorCount = errorCount + 1\n sleepEnabledFL = False\n except requests.exceptions.RequestException as e:\n # catastrophic error. retry up to the number of Max retries\n errorCount = errorCount + 1\n print(\"Request exception:\", e , \" Attempt:\", errorCount)\n sleepEnabledFL = False\n except:\n errorCount = errorCount + 1\n print(\"Unknown error! Attempt:\", errorCount)\n sleepEnabledFL = False\n else:\n # in case of no errors extraction of data may begin\n if resp.status_code != 200:\n # This means something went wrong and although the request was received OK the server is complaining.\n print('error in response, status code: {}'.format(resp.status_code))\n errorCount = errorCount + 1\n sleepEnabledFL = True\n else:\n\n # everything is fine so get extracting\n\n try:\n\n csv_reader = csv.reader(resp.text.splitlines(), dialect='pipes')\n headr = next(csv_reader, None)\n if startNewDF :\n df=pd.DataFrame([x for x in csv_reader], columns=headr)\n startNewDF = False\n else:\n df=df.append(pd.DataFrame([x for x in csv_reader], columns=headr)).drop_duplicates()\n startFL = False\n dflen = len(df)\n print(step,\":\",dflen)\n\n # reset error handling parameters\n sleepEnabledFL = True\n errorCount = 0\n if dflen > numLines:\n dateTimeObj = datetime.now()\n filePath = \"/data/API/shipUpdates{}_{}_{}_{}_{}.csv\"\\\n .format(dateTimeObj.year,\n dateTimeObj.month,\n dateTimeObj.day,\n dateTimeObj.hour,\n dateTimeObj.minute)\n df.to_csv(filePath)\n print(\"Saved to file:\", filePath)\n # save to s3\n subprocess.Popen('aws s3 --profile alexS3 cp {} s3://ungp-poc/API/updatesTest/'.format(filePath), shell = True)\n print('{} Saved to S3'.format(filePath))\n startNewDF = True\n\n except:\n print(\"Error in trying to extract the csv from requests or create DF! Skipping the chunk and trying the next one.\")\n\n","sub_path":"api_update.py","file_name":"api_update.py","file_ext":"py","file_size_in_byte":5077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"218309013","text":"from common import logger\nimport sys\n\nclass Decoder:\n def __init__(self):\n logger.info(\"A decoder is created\")\n\n @staticmethod\n def similarity(word1, word2, encoder):\n return Decoder.jaccardSimilarity(word1, word2, encoder)\n\n @staticmethod\n def rankFOUs(fous):\n return Decoder.centroidBasedRankFOUs(fous)\n\n @staticmethod\n def jaccardSimilarity(word1, word2, encoder):\n logger.info(\"Start jaccardSimilarity function\")\n (fou1, fou2) = (encoder.computeFOU(word1[0]), encoder.computeFOU(word2[0]))\n sim = 0\n intersect = 0\n union = 0\n (lower1, upper1) = (fou1[0], fou1[2])\n (lower2, upper2) = (fou2[0], fou2[2])\n size = len(lower1)\n for i in xrange(size):\n intersect += min(lower1[i], lower2[i]) + min(upper1[i], upper2[i])\n union += max(lower1[i], lower2[i]) + max(upper1[i], upper2[i])\n sim = float(intersect) / union\n logger.debug(\"The Similarity of two FOUs is {0:.15f}\".format(sim))\n logger.info(\"End jaccardSimilarity function\")\n return sim\n\n @staticmethod\n def centroidBasedRankFOUs(fous):\n logger.info(\"Start centroidBasedRankFOUs function\")\n sortedFOUs = sorted(fous, key=lambda elem: -sum(elem[1])/2)\n logger.info(\"End centroidBasedRankFOUs function\")\n return sortedFOUs\n","sub_path":"src/cww/decoder.py","file_name":"decoder.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"378779133","text":"#!/usr/local/bin/python3\n# -*- coding: utf-8 -*-\n# 203. Remove Linked List Elements \n\n__author__ = 'Libao Jin'\n__date__ = '03/22/2017'\n\nimport time\n\n# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution(object):\n def removeElements(self, head, val):\n \"\"\"\n :type head: ListNode\n :type val: int\n :rtype: ListNode\n \"\"\"\n cur = head\n\n while cur and cur.val == val:\n cur = head = cur.next\n while cur:\n if cur.next:\n cur_next_val = cur.next.val\n cur_val = cur.val\n if cur_next_val == val:\n cur.next = cur.next.next\n continue\n cur = cur.next\n return head\n\n def node2list(self, l):\n a = []\n while l is not None:\n a.append(l.val)\n l = l.next\n a.reverse()\n return a\n\n def list2node(self, a):\n l = ListNode(a.pop())\n current_node = l\n while len(a) > 0:\n current_node.next = ListNode(a.pop())\n current_node = current_node.next\n return l\n \n\n def print_list_node(self, l):\n while l is not None:\n print(l.val)\n l = l.next\n\n def test(self):\n start_time = time.time()\n head = self.list2node([1,1])\n val = 1\n result = self.removeElements(head, val)\n result = self.node2list(result)\n end_time = time.time()\n print(result)\n print('Elapsed time: {0:.6f}'.format(end_time - start_time))\n\nif __name__ == '__main__':\n s = Solution()\n s.test()\n","sub_path":"solutions/203_Remove_Linked_List_Elements.py","file_name":"203_Remove_Linked_List_Elements.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"450585370","text":"#pylint: disable=E1120\nfrom django.conf.urls.defaults import patterns, url\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.contrib.auth.decorators import login_required\n\n\ndef urls():\n urlpatterns = patterns('',\n url(r'^$','manaia.views.AreaCliente', name=\"areacliente\"),\n url(r'login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html','authentication_form': AuthenticationForm}, name='login'),\n url(r'logout/$', 'django.contrib.auth.views.logout', {'next_page': '/'},name=\"logout\"),\n url(r'^arquivos/$','manaia.views.AreaClienteAjax', name=\"viewareacliente\"),\n url(r'^pesquisar/$','manaia.views.PesquisaAreaClienteAjax', name=\"viewareacliente\"),\n )\n \n return urlpatterns","sub_path":"manaia/cliente/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"348740053","text":"import tornado.web\nimport time, xml.etree.ElementTree as XMLData\nimport DBOperation\nimport tornado.web,tornado.httpclient\nimport SendMail\nMsgOperation = DBOperation.MessageOperation()\nsendMail = SendMail.SendMail()\nrespXML = \"\"\"\n\n\n%s\n\n\n0\n\"\"\"\nclass MainHandler(tornado.web.RequestHandler):\n \n def get(self):\n resp = self.get_argument('echostr')\n self.write(resp) \n def post(self):\n \n reqXMLData = self.request.body\n root = XMLData.fromstring(reqXMLData)\n global reqData\n reqData = {}\n for child in root:\n reqData[child.tag] = child.text\n print(reqData[\"MsgType\"])\n MsgOperation.process()\n \n if('text' == reqData[\"MsgType\"]):\n self.getText()\n MsgOperation.createReceiveMsgTable()\n MsgOperation.insertRequestTextMessage(reqData)\n if('event' == reqData[\"MsgType\"]):\n if('subscribe' == reqData[\"Event\"]):\n MsgOperation.createReceiveMsgTable()\n MsgOperation.insertSubscribeMessage(reqData)\n sendMail.SendMailOperation(\"yangfan@zhaohuobao.cn\",\"subscribeInformation\",\"Have new subscribeInformation\")\n respData = respXML % (reqData[\"FromUserName\"],reqData[\"ToUserName\"],str(int(time.time())),\"text\",\"欢迎关注段子微信号。\")\n self.write(respData)\n \n @tornado.web.asynchronous\n def getText(self):\n http = tornado.httpclient.AsyncHTTPClient()\n http.fetch(\"http://brisk.eu.org/api/joke.php\",\n callback=self.on_response)\n def on_response(self, response):\n if response.error:\n print(\"resp error\")\n else:\n respText = response.body.decode()\n print(\"resp :\" + respText)\n respData = respXML % (reqData[\"FromUserName\"],reqData[\"ToUserName\"],str(int(time.time())),\"text\",respText)\n self.write(respData)\n self.finish()\n \n \n \n","sub_path":"HandlerMessage.py","file_name":"HandlerMessage.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"459475894","text":"\"\"\"\n10_3_guest.py\n\nWrite a program that prompts the user for their name. When they\nrespond, write their name to a file called guest.txt.\n\nCreated: 3-27-19\n@author: Brian Jacobe\n\"\"\"\n\nfilename = \"guest.txt\"\nprompt = \"Please enter your name here.\"\nuser_input = input(prompt)\n\nif user_input != \"\":\n\twith open(filename) as file_object:\n\t\tfile_object.write(user_input)\n\t\tfile_object.close()\n","sub_path":"Python Crash Course/chapter_10/10_3_guest.py","file_name":"10_3_guest.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"437477779","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass DomainDiscriminator(nn.Module):\n def __init__(self, num_classes=6, input_size=768 * 1,\n hidden_size=768, num_layers=3, dropout=0.1):\n super(DomainDiscriminator, self).__init__()\n self.num_layers = num_layers\n self.num_classes = num_classes\n hidden_layers = []\n for i in range(num_layers):\n if i == 0:\n input_dim = input_size\n else:\n input_dim = hidden_size\n hidden_layers.append(nn.Sequential(\n nn.Linear(input_dim, hidden_size),\n nn.ReLU(), nn.Dropout(dropout)\n ))\n hidden_layers.append(nn.Linear(hidden_size, num_classes))\n self.hidden_layers = nn.ModuleList(hidden_layers)\n\n def forward(self, x):\n # forward pass\n for i in range(self.num_layers - 1):\n x = self.hidden_layers[i](x)\n logits = self.hidden_layers[-1](x)\n log_prob = F.log_softmax(logits, dim=1)\n return log_prob","sub_path":"discriminator.py","file_name":"discriminator.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"63740900","text":"import argparse\nimport os\nimport logging\nimport requests\nimport json\nfrom requests_aws4auth import AWS4Auth\nfrom time import gmtime, strftime\nfrom Api_senario import *\n\nCREDENTIALS_GW = \"get-credentials\"\nAWS_REGION = \"eu-west-1\"\nFLOW_METHOD_BUILD = \"_flow\"\nRESPONSE_METHOD_BUILD = \"_response\"\nEVENT_ID = \"test_event\"\nAPI_BASE_URL_ILQA = \"https://e5ys3jg0g0.execute-api.eu-west-1.amazonaws.com\"\nAPI_BASE_URL_MOMDEVIL = \"https://gtauwa5wxf.execute-api.eu-west-1.amazonaws.com\"\nRESOURCE_URL_BASE_MOMDEVIL = 'https://d2cbrq8szn9kw3.cloudfront.net/mymoments/userData'\nRESOURCE_URL_BASE_ILQA = \"https://d3b9irng3e6axo.cloudfront.net/mymoments/userData\"\n\ndef mom_analytics_trigger(request_url, data, email_auth, cred_url):\n\n # creating the response auth for AWS\n cred_payload = {'email': email_auth}\n logging.debug(\"Trying to get authentication for {}\".format(cred_payload))\n aws_auth_response = requests.request(\"POST\", cred_url, data=json.dumps(cred_payload))\n # checking if the auth payload was created properly\n if not aws_auth_response.ok:\n logging.warning(\"The security token included in the request is invalid\")\n # converting the response to auth payload\n aws_auth = aws_auth_response.json()\n logging.debug(\"got authentication result {}\".format(json.dumps(aws_auth)))\n auth = AWS4Auth(aws_auth[\"AccessKeyId\"], aws_auth[\"SecretKey\"], AWS_REGION, 'execute-api',\n session_token=aws_auth[\"SessionToken\"])\n logging.debug(\"Got authentication\")\n # actually invoke the lambda\n\n response = requests.request(\"POST\", request_url, data=data, auth=auth)\n if not response.ok:\n logging.warning(\"The lambda was not correctly invoked!\")\n #else:\n # globals().get(method_call)(response.json())\n return response\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description=\"MyMoments Trigger Analytics\")\n\n parser.add_argument(\"--payload\", default=\" { \\\"users\\\" : [{ \\\"email\\\" : \\\"flow_test1@gmail.com\\\" , \\\"password\\\" : \\\"123456\\\" },{ \\\"email\\\" : \\\"flow_test2@gmail.com\\\" , \\\"password\\\" : \\\"123456\\\" }]} \",\n help=\"Enter payload for request in form: {\\\"users\\\": [{ \\\"email\\\" : \\\"example@agt.com\\\" , \\\"password\\\" : \\\"123456\\\"}]} \")\n parser.add_argument(\"--cluster\", default=\"momdevil\", help=\"Enter cluster env\")\n args = parser.parse_args()\n\n logging.basicConfig(filename = '/var/log/continuous.api.tests.log', level = logging.INFO, format='%(asctime)s %(levelname)s: %(message)s')\n logging.getLogger(\"requests\").setLevel(logging.WARNING)\n\n if 'ilqa' in args.cluster:\n full_flow(json.loads(args.payload), args.cluster,API_BASE_URL_ILQA)\n elif 'momdevil' in args.cluster:\n full_flow(json.loads(args.payload), args.cluster,API_BASE_URL_MOMDEVIL)\n","sub_path":"Avisrur-git/Api_Tests/Api_Test.py","file_name":"Api_Test.py","file_ext":"py","file_size_in_byte":2784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"540379314","text":"from tkinter import *\r\nfrom boards import *\r\n\r\nclass GameLogic(object):\r\n def __init__(self, width = 0, height = 0):\r\n self.width = width\r\n self.height = height\r\n self.root = Tk()\r\n self.canvas = Canvas(self.root, width = self.width, height = self.height)\r\n self.canvas.pack()\r\n\r\n self.floormap = Map(self.canvas)\r\n self.hero = Hero(self.canvas)\r\n\r\n self.canvas.bind(\"\", self.key_press)\r\n\r\n self.canvas.focus_set()\r\n self.root.mainloop()\r\n\r\n def key_press(self, e):\r\n if e.keycode == 8320768: # up\r\n self.hero.draw_character(self.hero.x, self.hero.y - 1, self.hero.character_img_up)\r\n elif e.keycode == 8255233: # down\r\n self.hero.draw_character(self.hero.x, self.hero.y + 1, self.hero.character_img_down)\r\n elif e.keycode == 8189699: # right\r\n self.hero.draw_character(self.hero.x + 1, self.hero.y, self.hero.character_img_right)\r\n elif e.keycode == 8124162: # left\r\n self.hero.draw_character(self.hero.x - 1, self.hero.y, self.hero.character_img_left)\r\n\r\nclass Map(object):\r\n def __init__(self, canvas):\r\n self.canvas = canvas\r\n self.floor = PhotoImage(file = \"/Users/MrFox/OneDrive/greenfox/the_wanderer_rpg/img/floor.gif\")\r\n self.wall = PhotoImage(file = \"/Users/MrFox/OneDrive/greenfox/the_wanderer_rpg/img/wall.gif\")\r\n self.level_map = map_1\r\n self.map_display(self.level_map)\r\n\r\n\r\n def draw_floor_tile(self, x, y):\r\n self.canvas.create_image(x, y, anchor=NW, image=self.floor)\r\n\r\n def draw_wall_tile(self, x, y):\r\n self.canvas.create_image(x, y, anchor=NW, image=self.wall)\r\n\r\n def map_display(self, board):\r\n tile = 72\r\n for row in range(len(board)):\r\n for cell in range(len(board[row])):\r\n if board[cell][row] == 0:\r\n self.draw_floor_tile(row*tile, cell*tile)\r\n else:\r\n self.draw_wall_tile(row*tile, cell*tile)\r\n\r\nclass Character(object):\r\n def __init__(self, canvas):\r\n self.canvas = canvas\r\n self.x = 0\r\n self.y = 0\r\n self.tile = 72\r\n self.character_delete = 0\r\n\r\n def draw_character(self, x, y, character_img):\r\n self.canvas.delete(self.character_delete)\r\n self.character_delete = self.canvas.create_image(x*self.tile, y*self.tile, anchor=NW, image=character_img)\r\n\r\nclass Hero(Character):\r\n def __init__(self, canvas):\r\n super().__init__(canvas)\r\n self.character_img_down = PhotoImage(file = \"/Users/MrFox/OneDrive/greenfox/the_wanderer_rpg/img/hero-down.gif\")\r\n self.character_img_up = PhotoImage(file = \"/Users/MrFox/OneDrive/greenfox/the_wanderer_rpg/img/hero-up.gif\")\r\n self.character_img_right = PhotoImage(file = \"/Users/MrFox/OneDrive/greenfox/the_wanderer_rpg/img/hero-right.gif\")\r\n self.character_img_left = PhotoImage(file = \"/Users/MrFox/OneDrive/greenfox/the_wanderer_rpg/img/hero-left.gif\")\r\n\r\n self.draw_character(self.x, self.y, self.character_img_down)\r\n\r\n def draw_character(self, x, y, character_img):\r\n floormap = Map(self.canvas)\r\n if 0 <= x <= 9 and 0 <= y <= 9:\r\n if floormap.level_map[y][x] == 0:\r\n self.x = x\r\n self.y = y\r\n self.canvas.delete(self.character_delete)\r\n self.character_delete = self.canvas.create_image(x*self.tile, y*self.tile, anchor=NW, image=character_img)\r\n\r\ngame = GameLogic(\"720\", \"720\")\r\n","sub_path":"the_wanderer.py","file_name":"the_wanderer.py","file_ext":"py","file_size_in_byte":3547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"127122893","text":"import matplotlib.pyplot as plt\nimport scipy.io as sio\n\n\n\ndef plot():\n\n data = sio.loadmat(\"result.mat\", squeeze_me=True)\n x, y = data[\"VV\"], data[\"RR\"]\n \n plt.plot(x, y, '-o')\n plt.xlabel(\"Bias (V)\")\n plt.ylabel(\"Ty (V/?)\")\n plt.grid()\n plt.show()\n","sub_path":"src/gui/res/PlotApp.py","file_name":"PlotApp.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"416171093","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\n\nclass heat():\n def __init__(self, k, dx, dt, nx, totT, x1, x2):\n self.dx = dx\n self.dt = dt\n self.nx = nx\n self.nt = int(totT/dt)\n self.u = np.zeros(nx)\n self.u[x1:x2] = 1\n self.alpha = k*dt/(dx**2)\n print(self.alpha)\n self.x = np.linspace(1,self.nx,self.nx)*self.dx\n self.M = np.eye(nx)*-2\n for i in range(nx):\n if i != 0:\n self.M[i][i-1] = 1\n if i != (nx-1):\n self.M[i][i+1] = 1\n self.M = np.linalg.inv(np.eye(self.nx)-self.alpha*self.M)\n def update(self):\n self.u = np.dot(self.M, self.u)\n #print(self.u)\n def calculate(self, savefig = False):\n img_num = 0\n for i in tqdm(range(self.nt)):\n self.update()\n if savefig and (i%int(self.nt/100)==0):\n plt.clf()\n plt.ylim(-1.1,1.1)\n plt.title(\"$\\\\beta={},t={:.2f}$\".format(self.alpha,self.dt*i))\n plt.plot(self.x, self.u)\n plt.savefig(\"./img/{}.jpg\".format(img_num))\n img_num += 1\n def snapshot(self):\n plt.ylim(-1.1,1.1)\n plt.plot(self.x, self.u)\n plt.show()\n\n\nh = heat(0.5,0.1,1,1000,1000,450,550)\nh.calculate(True)","sub_path":"Project/TeX Code/code/implicit.py","file_name":"implicit.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"626765684","text":"import glob\nimport re\n\ndef file_set():\n # set all the files from the extracted archive into a list\n return glob.glob('./smallset/*.msg')\n\ndef date_search(string):\n # regex looks for RFC spec date/time matches; see RFC 5322, 3.3\n date_regex = re.compile(r'^Date:\\s*(\\D{3})?,?\\s*(\\d{1,2})\\s*(\\D{3})\\s*(\\d{4}).*')\n return date_regex.search(string)\n\ndef sender_search(string):\n sender_regex = re.compile(r'^From:\\s*')\n return sender_regex.match(string)\n\ndef email_search(string):\n email_regex = re.compile(r'\\s])')\n return re.search(email_regex, string).group(1)\n\ndef subject_search(string):\n subject_regex = re.compile(r'^Subject:\\s*', re.M)\n return subject_regex.match(string)\n\ndef main():\n # iterate through each file in the list\n for file_path in file_set():\n # open file for reading; create new file for output\n with open(file_path, 'r') as email_msg, open('output.txt', 'a') as output_file:\n # include file name\n output_file.write('\\n' + email_msg.name + '\\n')\n\n #iterate through each line\n for line in email_msg:\n # read each line and use regex to match 'Sent:' header in message\n date_match = date_search(line)\n if date_match:\n # remove Date key and write value to file\n date = line.split('Date:')[1].strip() + '\\n'\n output_file.write(date)\n\n # read each line and use regex to match 'From:' header in message\n sender_match = sender_search(line)\n if sender_match:\n # find email in the line and write to file\n email = email_search(line) + '\\n'\n output_file.write(email)\n\n # read each line and use regex to match 'Subject: ' lines\n subject_match = subject_search(line)\n if subject_match:\n # remove Subject key and write value to file\n subject = line.split('Subject:')[1].strip() + '\\n'\n output_file.write(subject)\n\nif __name__ == \"__main__\":\n main()\n \n \n","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"459586693","text":"fileName = input(\"Enter file name: \")\nfp = open(fileName,\"r\")\nvalues = []\nintegers = []\nfor line in fp:\n values = line.strip().split(' ')\n for el in values:\n integers.append(int(el))\n\ndef find_subsequence(integers,n,inputparam):\n currentHigh = 0\n highestSum = 0\n absint = []\n n = abs(n)\n if inputparam == \"values\":\n if (len(integers) <= n):\n n = len(integers)-1\n for i in range(0,len(integers)-n):\n highestSum = 0\n for j in range(i,i+n):\n highestSum = highestSum + integers[j]\n if highestSum < 0:\n highestSum = 0\n elif (currentHigh < highestSum):\n currentHigh = highestSum\n return currentHigh\n elif inputparam == \"differences\":\n for k in range(0,len(integers)-1):\n absint.append(abs(integers[k+1]-integers[k]))\n if(len(absint)<=(n-1)):\n n = len(absint)\n for i in range(0,len(integers)-(n-1)):\n highestSum = 0\n for j in range(i,i+(n-1)):\n highestSum = highestSum + absint[j]\n if highestSum < 0:\n highestSum = 0\n elif (currentHigh < highestSum):\n currentHigh = highestSum\n return currentHigh\n\nn = int(input(\"Enter max length of subsequence: \"))\ninputparam = input(\"Third parameter: \")\nprint(find_subsequence(integers,n,inputparam))","sub_path":"assignment/data/find_subsequence.py","file_name":"find_subsequence.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"440401580","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jun 25 12:48:36 2019\r\n\r\n@author: Kajal\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\n\r\nm = 100\r\nX = 8 * np.random.randn(m, 1)\r\ny = 2 * X ** 2 + X + np.random.randn(m, 1)\r\n\r\n\r\nplt.scatter(X ,y)\r\n#zoom the graph between limits\r\nplt.axis([-3, 3 ,0 ,9])\r\nplt.show()\r\n\r\n\r\n#apply polynomial function to create 2nd order poly\r\nfrom sklearn.preprocessing import PolynomialFeatures\r\npoly = PolynomialFeatures(degree=2, include_bias = False)\r\nX_poly = poly.fit_transform(X)\r\n\r\n\r\n#apply linearregression algo to draw curve\r\nfrom sklearn.linear_model import LinearRegression\r\nlin_reg = LinearRegression()\r\nlin_reg.fit(X_poly, y)\r\n\r\n\r\nX_new = np.linspace(-3 ,3 ,100).reshape(-1, 1)\r\nX_new_poly = poly.fit_transform(X_new)\r\ny_new = lin_reg.predict(X_new_poly)\r\n\r\nplt.scatter(X ,y)\r\nplt.plot(X_new, y_new, c = \"r\")\r\nplt.axis([-3, 3 ,0 ,9])\r\nplt.show()\r\n\r\n#b1 , b2\r\nlin_reg.coef_\r\n\r\n#b0\r\nlin_reg.intercept_\r\n\r\n","sub_path":"poly_reg.py","file_name":"poly_reg.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"652485468","text":"from selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.support import expected_conditions as EC\n\nfrom django.contrib.auth import get_user_model\nfrom django.urls import reverse\n\nfrom .base import FunctionalTest, page_titles, DEFAULT_PWD, PROJECT_NAME\nfrom vocab.models import (\n VocabEntry, VocabContext, VocabContextEntry,\n VocabSource\n)\n\nUser = get_user_model()\n\npage_titles.update({\n \"page_vocab_user_dashboard_title_en\": \"{0} | {1}\".format(\"Dashboard\", PROJECT_NAME),\n \"page_vocab_entries_title_en\": \"{0} | {1}\".format(\"Vocab entries\", PROJECT_NAME),\n \"page_vocab_entry_edit_title_en\": \"{0} | {1}\".format(\"Edit vocab entry\", PROJECT_NAME),\n \"page_vocab_context_tag_title_en\": \"{0} | {1}\".format(\"Edit and tag context\", PROJECT_NAME),\n \"page_vocab_sources_title_en\": \"{0} | {1}\".format(\"Sources\", PROJECT_NAME),\n \"page_vocab_source_dashboard_title_en\": \"{0} | {1}\".format(\" - Dashboard\", PROJECT_NAME),\n \"page_vocab_source_create_title_en\": \"{0} | {1}\".format(\"Create source\", PROJECT_NAME),\n \"page_vocab_source_edit_title_en\": \"{0} | {1}\".format(\"Edit source\", PROJECT_NAME),\n})\n\n\nclass TestCommon(FunctionalTest):\n\n def setUp(self):\n super(TestCommon, self).setUp()\n self.user = User.objects.create_user(\n username=\"cfs7\",\n first_name=\"Christopher\",\n last_name=\"Sanders\",\n email=\"cfs7@cfs.com\",\n password=DEFAULT_PWD\n )\n\n def fill_and_submit_vocab_entry_form(self, entry=None, desc=None):\n if entry is not None:\n vocab_entry_field = self.get_element_by_id(\"entry\")\n vocab_entry_field.clear()\n vocab_entry_field.send_keys(entry)\n if desc is not None:\n vocab_source_desc_field = self.get_element_by_id(\"description\")\n vocab_source_desc_field.clear()\n vocab_source_desc_field.send_keys(desc)\n self.get_submit_button().click()\n\n def fill_and_submit_vocab_entry_modal_form(self, entry=None, desc=None):\n modal_id = \"vocab-entry-create-modal\"\n self.open_modal(\"sidebar-nav-vocab-entry-create\", modal_id)\n self.fill_and_submit_vocab_entry_form(entry, desc)\n self.wait.until(\n EC.invisibility_of_element_located((By.ID, modal_id))\n )\n\n def fill_and_submit_vocab_source_form(self, name=None, desc=None):\n if name is not None:\n vocab_source_name_field = self.get_element_by_id(\"name\")\n vocab_source_name_field.clear()\n vocab_source_name_field.send_keys(name)\n if desc is not None:\n vocab_source_desc_field = self.get_element_by_id(\"description\")\n vocab_source_desc_field.clear()\n vocab_source_desc_field.send_keys(desc)\n self.get_submit_button().click()\n\n def get_vocab_entry_autocomplete_text(self, vocab_entry):\n return \"{0} - {1}\".format(vocab_entry.language, vocab_entry.entry)\n\n\nclass VocabEntryTest(TestCommon):\n\n def setUp(self):\n super(VocabEntryTest, self).setUp()\n # self.vocab_entry = VocabEntry.objects.create(language=\"en\", entry=\"something\")\n self.vocab_entry_data = {\n \"language\": \"en\",\n \"entry\": \"inextricable\",\n \"description\": \"hello\"\n }\n\n def test_create_entry(self):\n\n # Create a vocab entry by filling out and submitting the modal form.\n self.browser.get(\"{0}{1}\".format(\n self.live_server_url,\n reverse(\"vocab:vocab_user_dashboard\"))\n )\n self.login_user(self.user.username)\n self.page_load(page_titles[\"page_vocab_user_dashboard_title_en\"])\n self.open_sidebar()\n self.fill_and_submit_vocab_entry_modal_form(entry=self.vocab_entry_data[\"entry\"])\n\n # VocabEntry object created.\n vocab_entry = VocabEntry.objects.get(\n language=self.vocab_entry_data[\"language\"],\n entry=self.vocab_entry_data[\"entry\"]\n )\n\n # New vocab entry appears in the search autocomplete.\n self.search_autocomplete_by_language(\n language=vocab_entry.language,\n search_text=vocab_entry.entry,\n )\n\n def test_delete_entry(self):\n\n vocab_entry = VocabEntry.objects.create(\n creator=self.user,\n language=self.vocab_entry_data[\"language\"],\n entry=self.vocab_entry_data[\"entry\"],\n )\n self.browser.get(\"{0}{1}\".format(\n self.live_server_url,\n reverse(\"vocab:vocab_user_dashboard\"))\n )\n self.login_user(self.user.username)\n self.page_load(page_titles[\"page_vocab_user_dashboard_title_en\"])\n self.open_sidebar()\n\n # Search for entry and select it.\n vocab_entry_search_result = self.search_autocomplete_by_language(\n language=vocab_entry.language,\n search_text=vocab_entry.entry,\n )\n vocab_entry_search_result.click()\n self.page_load(\"Vocab entry: {0} | {1}\".format(\n vocab_entry.entry,\n PROJECT_NAME\n ))\n\n # Delete vocab entry through modal.\n self.get_element_by_id(\"sidebar-nav-vocab-entry-edit\").click()\n self.page_load(page_titles[\"page_vocab_entry_edit_title_en\"])\n self.open_modal(trigger_id=\"vocab-entry-delete-trigger\", modal_id=\"vocab-entry-delete-modal\")\n self.get_element_by_id(\"vocab-entry-delete-ok\").click()\n self.wait.until(\n EC.invisibility_of_element_located((By.ID, \"vocab-entry-delete-modal\"))\n )\n self.page_load(page_titles[\"page_vocab_user_dashboard_title_en\"])\n self.assertFalse(\n VocabEntry.objects.filter(\n creator=self.user,\n language=self.vocab_entry_data[\"language\"],\n entry=self.vocab_entry_data[\"entry\"]\n ).exists()\n )\n\n\nclass VocabSourceTest(TestCommon):\n\n def setUp(self):\n super(VocabSourceTest, self).setUp()\n self.vocab_source_data = {\n \"name\": \"Nacidos de la bruma: El héroe de las eras\",\n \"description\": \"Tercer libro de la saga Nacidos de la bruma\"\n }\n\n def test_create_source(self):\n self.browser.get(\"{0}{1}\".format(\n self.live_server_url,\n reverse(\"vocab:vocab_sources_auth\")\n ))\n self.login_user(self.user.username)\n self.page_load(page_titles[\"page_vocab_sources_title_en\"])\n self.open_sidebar()\n self.get_element_by_id(\"sidebar-nav-vocab-source-create\").click()\n self.page_load(page_titles[\"page_vocab_source_create_title_en\"])\n # New source link is highlighted.\n self.assertIn(\n \"active\",\n self.get_element_by_id(\"sidebar-nav-vocab-source-create\").get_attribute(\"class\")\n )\n self.assertFalse(VocabSource.objects.filter(name=self.vocab_source_data[\"name\"]).exists())\n self.fill_and_submit_vocab_source_form(\n name=self.vocab_source_data[\"name\"],\n desc=self.vocab_source_data[\"description\"]\n )\n # New source exists.\n vocab_source = VocabSource.objects.get(\n creator=self.user,\n name=self.vocab_source_data[\"name\"]\n )\n self.page_load(\"{0}{1}\".format(\n vocab_source.name,\n page_titles[\"page_vocab_source_dashboard_title_en\"]\n ))\n\n def test_delete_source(self):\n vocab_source = VocabSource.objects.create(\n creator=self.user,\n name=self.vocab_source_data[\"name\"],\n description=self.vocab_source_data[\"description\"]\n )\n self.browser.get(\"{0}{1}\".format(\n self.live_server_url,\n reverse(\n \"vocab:vocab_source_dashboard\",\n kwargs={\n \"vocab_source_pk\": vocab_source.id,\n \"vocab_source_slug\": vocab_source.slug\n }\n )\n ))\n self.login_user(self.user.username)\n self.page_load(\"{0}{1}\".format(\n vocab_source.name,\n page_titles[\"page_vocab_source_dashboard_title_en\"]\n ))\n self.open_sidebar()\n self.get_element_by_id(\"sidebar-nav-vocab-source-edit\").click()\n self.page_load(page_titles[\"page_vocab_source_edit_title_en\"])\n # Source edit link is highlighted.\n self.assertIn(\n \"active\",\n self.get_element_by_id(\"sidebar-nav-vocab-source-edit\").get_attribute(\"class\")\n )\n\n # Delete source through modal.\n delete_modal_id = \"vocab-source-delete-modal\"\n self.open_modal(trigger_id=\"vocab-source-delete-trigger\", modal_id=delete_modal_id)\n self.get_element_by_id(\"vocab-source-delete-ok\").click()\n self.wait.until(\n EC.invisibility_of_element_located((By.ID, delete_modal_id))\n )\n self.page_load(page_titles[\"page_vocab_sources_title_en\"])\n self.assertFalse(\n VocabSource.objects.filter(\n creator=self.user,\n name=self.vocab_source_data[\"name\"]\n ).exists()\n )\n\n\nclass VocabContextEntryTest(TestCommon):\n\n def add_tag(self, tagbox_id=None, tag=None, return_key=False):\n if tagbox_id and tag:\n css_selector = \"#{0} .tagbox-input\".format(tagbox_id)\n tagbox_input = self.get_element_by_css(css_selector)\n tagbox_input.send_keys(tag)\n if return_key:\n tagbox_input.send_keys(u\"\\ue007\")\n\n def get_tag_xpath(self, tagbox_id=None, tag=None, close=False):\n if tagbox_id and tag:\n if not close:\n xpath = \"//a[contains(., '{0}') and ancestor::div[@id='{1}']]\".format(\n tag,\n tagbox_id\n )\n else:\n # Get tag's close button xpath.\n xpath = \"//a[@class='delete-tag' and preceding-sibling::a[contains(., '{0}')] and ancestor::div[@id='{1}']]\".format(\n tag,\n tagbox_id\n )\n return xpath\n\n def get_highlight_xpath(self, tag):\n xpath = \"//mark[@class='tagged-text' and contains(., '{0}')]\".format(tag)\n return xpath\n\n def tag_hover(self, tag_element):\n hover = ActionChains(self.browser).move_to_element(tag_element)\n hover.perform()\n\n def setUp(self):\n super(VocabContextEntryTest, self).setUp()\n self.vocab_source = VocabSource.objects.create(\n creator=self.user,\n source_type=VocabSource.BOOK,\n name=\"A great book\"\n )\n self.vocab_entry_1 = VocabEntry.objects.create(\n creator=self.user,\n language=\"en\",\n entry=\"eat\"\n )\n self.vocab_entry_2 = VocabEntry.objects.create(\n creator=self.user,\n language=\"en\",\n entry=\"pizza\"\n )\n context_text = \"Peter likes to eat pizza on Sunday. Karen likes to eat pizza on Friday\"\n self.vocab_context = VocabContext.objects.create(vocab_source=self.vocab_source, content=context_text)\n\n def test_vocab_context_tag(self):\n vocab_entry_tagbox_id = \"vocab-entry-tagbox\"\n vocab_entry_container_id = \"vocab-entry-tags\"\n vocab_entry_instance_tagbox_id = \"vocab-entry-instance-tagbox\"\n vocab_entry_instance_container_id = \"vocab-entry-instance-tags\"\n self.browser.get(\"{0}{1}\".format(\n self.live_server_url,\n reverse(\"vocab:vocab_context_tag\", kwargs={\"vocab_context_pk\": self.vocab_context.id}))\n )\n self.login_user(self.user.username)\n self.page_load(page_titles[\"page_vocab_context_tag_title_en\"])\n\n # VocabContextEntry objects don't exist yet.\n self.assertFalse(\n VocabContextEntry.objects.filter(\n vocab_context_id=self.vocab_context,\n vocab_entry_id=self.vocab_entry_1\n ).exists()\n )\n self.assertFalse(\n VocabContextEntry.objects.filter(\n vocab_context_id=self.vocab_context,\n vocab_entry_id=self.vocab_entry_2\n ).exists()\n )\n\n # Add tags.\n for vocab_entry in [self.vocab_entry_1]:\n self.add_tag(tagbox_id=vocab_entry_tagbox_id, tag=vocab_entry.entry)\n\n # Click autocomplete link.\n autocomplete_text = self.get_vocab_entry_autocomplete_text(vocab_entry)\n\n self.wait.until(EC.element_to_be_clickable((By.LINK_TEXT, autocomplete_text)))\n vocab_entry_tag = self.get_element_by_link_text(autocomplete_text)\n vocab_entry_tag.click()\n\n # Tag is added and VocabContextEntry record is added.\n vocab_entry_xp = self.get_tag_xpath(tagbox_id=vocab_entry_container_id, tag=vocab_entry.entry)\n self.wait.until(EC.element_to_be_clickable((By.XPATH, vocab_entry_xp)))\n vocab_context_entry = VocabContextEntry.objects.get(\n vocab_context_id=self.vocab_context,\n vocab_entry_id=vocab_entry\n )\n self.assertEqual(list(vocab_context_entry.get_vocab_entry_tags()), [])\n\n # Click on tag and load vocab-entry-instance tagbox.\n self.get_element_by_xpath(vocab_entry_xp).click()\n vocab_entry_instance_tagbox = \"vocab-entry-instance-tagbox\"\n self.wait.until(EC.element_to_be_clickable((By.ID, vocab_entry_instance_tagbox)))\n self.assertTrue(vocab_entry.entry in self.vocab_context.content)\n\n # Add vocab entry instance tag.\n self.add_tag(tagbox_id=vocab_entry_instance_tagbox_id, tag=vocab_entry.entry, return_key=True)\n vocab_entry_instance_xp = self.get_tag_xpath(tagbox_id=vocab_entry_instance_container_id, tag=vocab_entry.entry)\n self.wait.until(EC.element_to_be_clickable((By.XPATH, vocab_entry_instance_xp)))\n vocab_entry_instance_tag = self.get_element_by_xpath(vocab_entry_instance_xp)\n\n # Vocab entry instance is highlighted in text.\n vocab_entry_highlight_xp = self.get_highlight_xpath(tag=vocab_entry.entry)\n vocab_entry_highlighted = self.get_elements_by_xpath(vocab_entry_highlight_xp)\n self.assertEqual(len(vocab_entry_highlighted), 2)\n\n # Vocab entry tags have been saved to VocabContextEntry object.\n self.assertEqual(list(vocab_context_entry.get_vocab_entry_tags()), [vocab_entry.entry])\n\n # Delete vocab instance tag.\n self.tag_hover(tag_element=vocab_entry_instance_tag)\n vocab_entry_instance_close_xp = self.get_tag_xpath(\n tagbox_id=vocab_entry_instance_container_id,\n tag=vocab_entry.entry,\n close=True\n )\n self.wait.until(EC.element_to_be_clickable((By.XPATH, vocab_entry_instance_close_xp)))\n self.get_element_by_xpath(vocab_entry_instance_close_xp).click()\n self.wait.until(EC.invisibility_of_element_located((By.XPATH, vocab_entry_instance_xp)))\n self.assertEqual(list(vocab_context_entry.get_vocab_entry_tags()), [])\n\n # Delete vocab entry tag\n vocab_entry_tag = self.get_element_by_xpath(vocab_entry_xp)\n self.tag_hover(vocab_entry_tag)\n vocab_entry_close_xp = self.get_tag_xpath(\n tagbox_id=vocab_entry_container_id,\n tag=vocab_entry.entry,\n close=True\n )\n self.wait.until(EC.element_to_be_clickable((By.XPATH, vocab_entry_close_xp)))\n self.get_element_by_xpath(vocab_entry_close_xp).click()\n self.wait.until(EC.invisibility_of_element_located((By.XPATH, vocab_entry_xp)))\n self.assertFalse(\n VocabContextEntry.objects.filter(\n vocab_context_id=self.vocab_context,\n vocab_entry_id=vocab_entry\n ).exists()\n )\n\n # Vocab entry instance is not highlighted in text.\n vocab_entry_highlight_xp = self.get_highlight_xpath(tag=vocab_entry.entry)\n vocab_entry_highlighted = self.get_elements_by_xpath(vocab_entry_highlight_xp)\n self.assertEqual(len(vocab_entry_highlighted), 0)\n\n","sub_path":"app/tests/selenium/test_vocab.py","file_name":"test_vocab.py","file_ext":"py","file_size_in_byte":16296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"32616810","text":"from enum import Enum\n\n\nclass AbstractText:\n \"\"\"\n Abstract class to define elasticsearch document schema\n \"\"\"\n\n # define index name here\n index = NotImplemented\n\n # define document schema here\n class Field(str, Enum):\n ID = 'id' # 'id' is required\n\n def __init__(self, dct=None):\n if dct:\n for field in self.Field:\n key = field.value\n if key in dct:\n setattr(self, key, dct[key])\n\n def __repr__(self):\n return f'<{self.__class__.__name__}: {self.__dict__}>'\n\n @classmethod\n def get_all_fields(cls):\n \"\"\"\n return all field names except for 'id'\n \"\"\"\n\n fields = []\n for field in cls.Field:\n if field.name != 'ID':\n fields.append(field.value)\n return fields\n\n\nclass NewsText(AbstractText):\n index = 'news'\n\n class Field(str, Enum):\n ID = 'id'\n TITLE = 'title'\n BODY = 'body'\n\n\nclass BillText(AbstractText):\n index = 'bill'\n\n class Field(str, Enum):\n ID = 'id'\n TITLE = 'title'\n BODY = 'body'\n SUPPL = 'suppl'\n REASON = 'reason'\n","sub_path":"politylink/elasticsearch/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"113858952","text":"'''\nProblem 89\n\nFor a number written in Roman numerals to be considered valid there are basic rules which must be followed. Even though the rules allow some numbers to be expressed in more than one way there is always a \"best\" way of writing a particular number.\n\nFor example, it would appear that there are at least six ways of writing the number sixteen:\n\nIIIIIIIIIIIIIIII\nVIIIIIIIIIII\nVVIIIIII\nXIIIIII\nVVVI\nXVI\n\nHowever, according to the rules only XIIIIII and XVI are valid, and the last example is considered to be the most efficient, as it uses the least number of numerals.\n\nThe 11K text file, roman.txt (right click and 'Save Link/Target As...'), contains one thousand numbers written in valid, but not necessarily minimal, Roman numerals; see About... Roman Numerals for the definitive rules for this problem.\n\nFind the number of characters saved by writing each of these in their minimal form.\n'''\n\nimport time\n\nf = open(\"data/roman.txt\", \"r\")\nnumerals = [num.split()[0] for num in f]\n\nnumeral_map = {\n\t'I' : 1,\n\t'V' : 5,\n\t'X' : 10,\n\t'L' : 50,\n\t'C' : 100,\n\t'D' : 500,\n\t'M' : 1000,\n}\n\ndef numeral_to_decimal(numeral):\n\ti, decimal = 0, 0\n\twhile i < len(numeral):\n\t\tif i < len(numeral) - 1 and numeral[i] == 'I' and numeral[i+1] in ['V', 'X']:\n\t\t\tdecimal += numeral_map[numeral[i+1]] - numeral_map[numeral[i]]\n\t\t\ti += 2\n\t\telif i < len(numeral) - 1 and numeral[i] == 'X' and numeral[i+1] in ['L', 'C']:\n\t\t\tdecimal += numeral_map[numeral[i+1]] - numeral_map[numeral[i]]\n\t\t\ti += 2\n\t\telif i < len(numeral) - 1 and numeral[i] == 'C' and numeral[i+1] in ['D', 'M']:\n\t\t\tdecimal += numeral_map[numeral[i+1]] - numeral_map[numeral[i]]\n\t\t\ti += 2\n\t\telse:\n\t\t\tdecimal += numeral_map[numeral[i]]\n\t\t\ti += 1\n\treturn decimal\n\ndef decimal_to_numeral(decimal):\n\tnumeral = ''\n\twhile decimal:\n\t\tif decimal >= 1000:\n\t\t\tnumeral += 'M' * (decimal // 1000)\n\t\t\tdecimal -= (decimal // 1000) * 1000\n\t\telif decimal >= 100:\n\t\t\tif decimal // 100 == 9:\n\t\t\t\tnumeral += 'CM'\n\t\t\telif decimal // 100 >= 5:\n\t\t\t\tnumeral += 'D' + 'C' * ((decimal - 500) // 100)\n\t\t\telif decimal // 100 == 4:\n\t\t\t\tnumeral += 'CD'\n\t\t\telse:\n\t\t\t\tnumeral += 'C' * (decimal // 100)\n\t\t\tdecimal -= (decimal // 100) * 100\n\t\telif decimal >= 10:\n\t\t\tif decimal // 10 == 9:\n\t\t\t\tnumeral += 'XC'\n\t\t\telif decimal // 10 >= 5:\n\t\t\t\tnumeral += 'L' + 'X' * ((decimal - 50) // 10)\n\t\t\telif decimal // 10 == 4:\n\t\t\t\tnumeral += 'XL'\n\t\t\telse:\n\t\t\t\tnumeral += 'X' * (decimal // 10)\n\t\t\tdecimal -= (decimal // 10) * 10\n\t\telse:\n\t\t\tif decimal == 9:\n\t\t\t\tnumeral += 'IX'\n\t\t\telif decimal >= 5:\n\t\t\t\tnumeral += 'V' + 'I' * (decimal - 5)\n\t\t\telif decimal == 4:\n\t\t\t\tnumeral += 'IV'\n\t\t\telse:\n\t\t\t\tnumeral += 'I' * decimal\n\t\t\tdecimal -= decimal\n\treturn numeral\n\ndef roman_numerals():\n\tused_chars = sum([len(num) for num in numerals])\n\tfor i in range(len(numerals)):\n\t\tnumerals[i] = decimal_to_numeral(numeral_to_decimal(numerals[i]))\n\tmin_chars = sum([len(num) for num in numerals])\n\treturn used_chars - min_chars\n\nif __name__ == '__main__':\n\n\tstart = time.time()\n\tprint(roman_numerals())\n\tend = time.time()\n\n\tprint(\"Execution time: %fs\" %(end - start))\n","sub_path":"solutions/roman_numerals.py","file_name":"roman_numerals.py","file_ext":"py","file_size_in_byte":3061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"265803750","text":"import yaml\nfrom collections import ChainMap\n\n\nfrom helpers.logHelpers import createLog\n\nlogger = createLog('configHelpers')\n\n\ndef loadEnvFile(runType, fileString):\n \"\"\"Loads configuration details from a specific yaml file.\n\n Arguments:\n runType {string} -- The environment to load configuration details for.\n fileString {string} -- The file string format indicating where to load\n the configuration file from.\n\n Raises:\n YAMLError: Indicates malformed yaml markup in the configuration file\n\n Returns:\n dict -- A dictionary containing the configuration details parsed from\n the specificied yaml file.\n \"\"\"\n envDict = None\n\n if fileString:\n openFile = fileString.format(runType)\n else:\n openFile = 'config.yaml'\n\n try:\n with open(openFile) as envStream:\n try:\n envDict = yaml.full_load(envStream)\n except yaml.YAMLError as err:\n logger.error('{} Invalid! Please review'.format(openFile))\n raise err\n\n except FileNotFoundError as err:\n logger.info('Missing config YAML file! Check directory')\n logger.debug(err)\n\n if envDict is None:\n return {}\n\n return envDict\n\n\ndef loadEnvVars(runType):\n \"\"\"Loads a full set of configuration details from the both the default\n configuration file and any environment specific settings.\n\n Arguments:\n runType {string} -- The current environment.\n\n Returns:\n ChainMap -- A ChainMap object combining the dictionaries returned from\n the default config.yaml file (required) and an environment-specific\n file (optional). The environment-specific details will override any\n settings in the default file.\n \"\"\"\n # Load env variables from relevant .yaml file\n envDict = loadEnvFile(runType, 'config/{}.yaml')\n\n # Overwrite/add any vars in the core config.yaml file\n configDict = loadEnvFile(runType, None)\n\n combinedConfig = ChainMap(envDict, configDict)\n\n return combinedConfig\n\n\ndef setEnvVars(runType):\n \"\"\"Produces a yaml file that can be read by the Lambda deployment process\n from the combined arguments from loadEnvVars\n\n Arguments:\n runType {string} -- The current environment.\n\n Raises:\n IOError: Raised when the method is unable to produce a yaml file due\n to file/directory permission issues.\n \"\"\"\n envVars = loadEnvVars(runType)\n\n try:\n with open('run_config.yaml', 'w') as newConfig:\n yaml.dump(\n dict(envVars),\n newConfig,\n default_flow_style=False\n )\n except IOError as err:\n logger.error(('Script lacks necessary permissions, '\n 'ensure user has permission to write to directory'))\n raise err\n","sub_path":"lambda/sfr-viaf-lookup/helpers/configHelpers.py","file_name":"configHelpers.py","file_ext":"py","file_size_in_byte":2841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"31296659","text":"import re\n\ns = \"12-02-2018\"\n# 32-13-2018\n# dd-mm-yyyy\nr = re.compile(r\"^([0-9]{2})-([0-9]{2})-([0-9]{4})$\")\n# # l = re.findall(r,s)\n'''# # print(l)\nm = re.search(r,s)\nprint(m)\nif m:\n\tprint(m.group())\nelse:\n \tprint(\"pattern not found\")\n'''\n#Search returns a object of call match if pattern found \n# else None \n\ns1 = \"(+91)7123456789\"\n# s1 = \"7123456789\"\n\n# l = re.findall(\"[A-Za-z]+[0-9]?\",\"Python\")\n# print(l)\n'''\nr = re.compile(r\"^(\\(\\+91\\))?[6-9][0-9]{9}$\")\nm = re.search(r,s1)\nif m:\n \tprint(m.group())\nelse:\n \tprint(\"Pattern not found\")\n\n# url = \"www.zekelabs.com\"\n\n# click here\n\n# s = \"the code link is https://www.github.com, http://www.zekelabs.com\"\n\n# r = re.compile(r\"https?://www\\.[a-z0-9]+\\.com\")\n# l = re.findall(r,s)\n# print(l)\n\n# for value in l:\n# \ts = s.replace(value,\"{}\".format(value,value))\n# print(s)\n\n# \"the code link is http://www.github.com,\n# http://zekelabs.com\n\n# ?: : non caputuring group\n# ?P : named group\n\ns = \"12-02-2018\"\n# 32-13-2018\n# dd-mm-yyyy\nr = re.compile(r\"^(?P[0-9]{2})-(?P[0-9]{2})-(?P[0-9]{4})$\")\n# # # l = re.findall(r,s)\n# # # print(l)\n# m = re.search(r,s)\n# if m:\n# \tprint(m.group(\"year\"))\n# else:\n# \tprint(\"pattern not found\")\n# Search returns a object of call match if pattern found \n# else None \n\n'''\n\ns = \"(+91)7123456789\"\n# s1 = \"7123456789\"\n\n#l = re.findall(\"[A-Za-z]+[0-9]?\",\"Python\")\n#print(l)\n\n\nr = re.compile(r\"^(?:\\(\\+91\\))?([6-9]\\d{9})$\")\nm = re.search(r,s)\nif m:\n\tprint(m.group())\n\tprint(m.group(0))\n\tprint(m.group(1))\nelse:\n\tprint(\"Pattern not found\")\n\n# m = re.search(r,s)\n# if m : \n# \tprint(m.group())\n# else:\n# \tprint(\"pattern not found\")\n\n# s = \"Python1234$$$$\"\n# print(re.findall(\"[^A-Za-z]+\",s))\n\n# [a-zA-Z0-9] = > \\w\n# [^a-zA-Z0-9] => \\W\n\n# [0-9] => \\d\n# [^0-9] = > \\D\n\n# \\s => space \n# \\S => compliment of space \n\n# email address validation abc123@gmail.com\n# date validation \n# ip valid 0.0.0.0 \n# \t\t255.255.255.255\n\n\n","sub_path":"regexscripts/regex2.py","file_name":"regex2.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"494616888","text":"\"\"\"SDGym - Synthetic data Gym.\n\nSDGym is a framework to benchmark the performance of synthetic data generators for\ntabular data.\n\"\"\"\n\n__author__ = 'MIT Data To AI Lab'\n__copyright__ = 'Copyright (c) 2018, MIT Data To AI Lab'\n__email__ = 'dailabmit@gmail.com'\n__license__ = 'MIT'\n__version__ = '0.4.1.dev0'\n\nfrom sdgym import benchmark, synthesizers\nfrom sdgym.benchmark import run\nfrom sdgym.collect import collect_results\nfrom sdgym.datasets import load_dataset\nfrom sdgym.summary import make_summary_spreadsheet\n\n__all__ = [\n 'benchmark',\n 'synthesizers',\n 'run',\n 'load_dataset',\n 'collect_results',\n 'make_summary_spreadsheet'\n]\n","sub_path":"sdgym/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"101426837","text":"from django.conf.urls import url\nfrom metrotract import views\n# from registration.views import RegistrationView\n#\n# class MyRegistrationView(RegistrationView):\n# def get_success_url(self,request, user):\n# return 'metrotract/issue_list.html'\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^about$', views.about, name='about'),\n url(r'^cities$', views.city_list, name='city_list'),\n url(r'^issues$', views.issue_list, name='issue_list'),\n url(r'^categories$', views.category_list, name='category_list'),\n url(r'^add_issue$', views.add_issue, name='add_issue'),\n url(r'^add_city$', views.add_city, name='add_city'),\n url(r'^add_category$', views.add_category, name='add_category'),\n url(r'^restricted/', views.restricted, name='restricted'),\n url(r'^getting_started/', views.getting_started, name='getting_started'),\n url(r'^question/(?P[\\w\\-]+)/$', views.answer_question, name='answer_question'),\n #url(r'^accounts/register/$', MyRegistrationView.as_view(), name='registration_register'),\n ]","sub_path":"metrotract/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"353543309","text":"'''\nSource : https://leetcode.com/problems/non-decreasing-array/\nAuthor : Yuan Wang\nDate : 2018-06-16\n\n/********************************************************************************** \n*Given an array with n integers, your task is to check if it could become non-decreasing by modifying at most 1 element.\n*\n*We define an array is non-decreasing if array[i] <= array[i + 1] holds for every i (1 <= i < n).\n8\n*Example 1:\n*Input: [4,2,3]\n*Output: True\n*Explanation: You could modify the first 4 to 1 to get a non-decreasing array.\n*\n*Example 2:\n*Input: [4,2,1]\n*Output: False\n*Explanation: You can't get a non-decreasing array by modify at most one element.\n**********************************************************************************/\n'''\n\ndef check_order(numbers):\n\tfor i in range(1,len(numbers)):\n\t\tif numbers[i] 0:\n m = m_list[0]\n return cls(m.pk, )\n return None\n\n def read(self):\n if self.object is None:\n _m = self.get_obj()\n self.object = {\n 'id': self.id,\n 'out_trade_no': _m.out_trade_no,\n 'trade_no': _m.trade_no,\n 'total_fee': _m.total_fee,\n 'trade_status': _m.trade_status\n }\n return self.object\n\n @property\n def out_trade_no(self):\n obj = self.read()\n out_trade_no = obj.get('out_trade_no')\n return out_trade_no\n\n @property\n def trade_no(self):\n obj = self.read()\n trade_no = obj.get('trade_no')\n return trade_no\n\n @property\n def total_fee(self):\n obj = self.read()\n total_fee = obj.get('total_fee')\n return total_fee\n\n @property\n def trade_status(self):\n obj = self.read()\n trade_status = obj.get('trade_status')\n return trade_status\n\n def is_success(self):\n return self.trade_status in ('TRADE_SUCCESS', 'TRADE_FINISHED')\n\n\nclass WxTradeLog(Base):\n def __init__(self, pk):\n super(WxTradeLog, self).__init__(pk)\n self.__object = None\n\n @classmethod\n def create(cls, out_trade_no, total_fee, result_code, transaction_id, trade_type, fee_type, appid='', mch_id=''):\n log_base = WxTradeLogModel(out_trade_no=out_trade_no, total_fee=total_fee,\n result_code=result_code, transaction_id=transaction_id,\n trade_type=trade_type, fee_type=fee_type,\n appid=appid, mch_id=mch_id)\n g.db_session.add(log_base)\n g.db_session.commit()\n return cls(pk=log_base.id)\n\n def get_obj(self):\n hdl = g.db_session.query(WxTradeLogModel).get(self.id)\n return hdl\n\n @classmethod\n def filter_transaction(cls, out_trade_no, trade_status='SUCCESS'):\n hdl = g.db_session.query(WxTradeLogModel).filter(WxTradeLogModel.result_code==trade_status).filter(WxTradeLogModel.out_trade_no==out_trade_no)\n return hdl\n\n @classmethod\n def transaction(cls, out_trade_no, trade_status='SUCCESS'):\n m_list = cls.filter_transaction(out_trade_no, trade_status=trade_status)\n if len(m_list) > 0:\n m = m_list[0]\n return cls(m.pk)\n return None\n\n def read(self):\n if self.__object is None:\n _m = self.get_obj()\n self.__object = {\n 'id': self.id,\n 'out_trade_no': _m.out_trade_no,\n 'total_fee': _m.total_fee,\n 'trade_status': _m.result_code,\n 'appid': _m.appid,\n 'mch_id': _m.mch_id\n }\n return self.__object\n\n @property\n def out_trade_no(self):\n obj = self.read()\n out_trade_no = obj.get('out_trade_no')\n return out_trade_no\n\n @property\n def total_fee(self):\n obj = self.read()\n total_fee = obj.get('total_fee')\n return total_fee\n\n @property\n def trade_status(self):\n obj = self.read()\n trade_status = obj.get('trade_status')\n return trade_status\n\n @property\n def appid(self):\n obj = self.read()\n appid = obj.get('appid')\n return appid\n\n @property\n def is_success(self):\n return self.trade_status == 'SUCCESS'\n","sub_path":"manage/trade/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":6316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"399469991","text":"#Given a string find if the characters are unique\n#CTCI 1.1\n\"\"\"\n- Read input string as ascii list\n- Exit if repeating values appear in ascii list\n- 48 to 126 is range for ascii codes\n\"\"\"\nascii_list = []\ninp_str = input(\"Enter the string:\")\nfor i in inp_str:\n if ord(i) in ascii_list: #ord gives ascii value\n print(\"String has unique characters\")\n exit()\n ascii_list.append(ord(i))\nprint(\"String does not have unique characters\")\nprint(ascii_list)\n\n\"\"\"\nsecond solution\n- read char into list.\n- if char present in list, exit\n\"\"\"\ninp_str=input(\"Enter String:\")\ntmp_list = []\nfor i in inp_str:\n if i in tmp_list:\n print(\"found duplicate\")\n exit(0)\n else:\n tmp_list.append(i)\n","sub_path":"solutions/02_unique_char_string.py","file_name":"02_unique_char_string.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"77169887","text":"l1 = [1, 2, 3, 4]\nl2 = [5, 6, 7, 8]\n\nk = zip(l1, l2)\n\nkeys = [1, 2, 3, 4]\nvals = [\"asdf\", \"asdfafasdfa\", \"adfsadfaiojgt\", \"asdfsdfaa\"]\n\nlist = zip(keys, vals)\ndict = dict(list)\nprint(dict)\n\nS = 'spam'\nfor (offset, item) in enumerate(S):\n print(item, 'appears at offset', offset)","sub_path":"src/Tutorials/Lists/zip_that_shit.py","file_name":"zip_that_shit.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"436249707","text":"from array import array\n\nunsigned_int = array('i') # 创建一个带符号整数数组\nunsigned_int.append(-1)\nunsigned_int.append(1)\n\nsigned_int = array('I', [1, 3, 4]) # 创建一个无符号整数数组\nsigned_int.append(2)\ntry:\n signed_int.append(-1)\nexcept OverflowError as e:\n print(e)\n \nfloat_arr = array('f', [1.2, 3.45]) # 创建一个浮点数数组,元素长度为 4 字节\nfloat_arr.append(6.7)\nfloat_arr.append(8)\n\ndouble_arr = array('d', [1, 2.3, 456]) # 创建一个浮点数数组,元素长度为 8 字节\n\nprint(unsigned_int, signed_int, float_arr, double_arr)\n# 数组的元素符号,数组元素的字节长度\nprint(unsigned_int.typecode, unsigned_int.itemsize)\nprint(list(unsigned_int))\n","sub_path":"src/ch5/array_demo.py","file_name":"array_demo.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"186190164","text":"# coding=utf-8\n__author__ = 'bITeMe'\nimport re\n\nstr1 = 'someone@gmail.com'\nstr2 = 'bill.gates@microsoft.com'\n\nreg = r'^[a-zA-Z][a-zA-Z0-9\\.]+@[a-zA-Z0-9]+\\.[a-zA-Z]+$'\n\nif re.match(reg, str1):\n print('str1 ok')\nelse:\n print('str1 failed')\n\nif re.match(reg, str2):\n print('str2 ok')\nelse:\n print('str2 failed')\n","sub_path":"regex/regex1.py","file_name":"regex1.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"509158161","text":"from helper import TestCase\nfrom ecc import Point\n\nclass PointTest(TestCase):\n\n def test_ne(self):\n a = Point(x=3, y=-7, a=5, b=7)\n b = Point(x=18, y=77, a=5, b=7)\n self.assertTrue(a != b)\n self.assertFalse(a != a)\n\n def test_on_curve(self):\n with self.assertRaises(ValueError):\n Point(x=-2, y=4, a=5, b=7)\n # these should not raise an error\n Point(x=3, y=-7, a=5, b=7)\n Point(x=18, y=77, a=5, b=7)\n\n def test_add0(self):\n a = Point(x=None, y=None, a=5, b=7)\n b = Point(x=2, y=5, a=5, b=7)\n c = Point(x=2, y=-5, a=5, b=7)\n self.assertEqual(a + b, b)\n self.assertEqual(b + a, b)\n self.assertEqual(b + c, a)\n\n def test_add1(self):\n a = Point(x=3, y=7, a=5, b=7)\n b = Point(x=-1, y=-1, a=5, b=7)\n self.assertEqual(a + b, Point(x=2, y=-5, a=5, b=7))\n\n def test_add2(self):\n a = Point(x=-1, y=1, a=5, b=7)\n self.assertEqual(a + a, Point(x=18, y=-77, a=5, b=7))","sub_path":"test_Point.py","file_name":"test_Point.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"541586910","text":"import pandas as pd\nimport os\nimport joblib\nimport pickle\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.model_selection import RandomizedSearchCV, train_test_split\n# from sklearn.linear_model import SGDClassifier\n# import xgboost as xgb\nfrom sklearn.metrics import accuracy_score, roc_auc_score, roc_curve, classification_report\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import Pipeline\n\n# Tran model\n\nclass PropertyModel:\n \n def __init__(self):\n self.seed = 2020\n self.X_test = None\n self.y_test = None\n self.model_rf = None\n self.label = None\n self.features = None\n self.dups_pct = None\n self.final_model = None\n self.features_col_names = None\n\n def split_data_label_features(self, data):\n self.dups_pct = data.isna().any(axis=1).mean() \n data_no_dups = data.dropna()\n\n self.features = data_no_dups[[x for x in data_no_dups.columns if data_no_dups[x].dtype!='object']]\n self.label = pd.to_numeric(data_no_dups.final_decision=='A').astype(np.int8)\n\n def fit_model_in_validation(self):\n X_train, self.X_test, y_train, self.y_test = train_test_split(self.features, self.label, test_size=0.2, random_state=self.seed)\n\n rf_acceptance = RandomForestClassifier(n_estimators=50)\n pipeline_rf = Pipeline([('scaler', StandardScaler()),\n ('select_model', SelectFromModel(rf_acceptance, prefit=False)),\n ('model', RandomForestClassifier(random_state=self.seed))],\n verbose = False)\n\n params_rf = {\n 'model__n_estimators': [100, 200, 500],\n 'model__min_samples_split': [2, 5, 10],\n 'model__min_samples_leaf': [1, 5, 10]\n }\n\n self.features_col_names = X_train.columns.tolist()\n self.model_rf = RandomizedSearchCV(pipeline_rf, params_rf, cv=5, random_state=self.seed, n_iter=5, verbose=0, n_jobs=-1)\n self.model_rf = self.model_rf.fit(X_train, y_train)\n\n def print_model_metrics(self):\n print('-------------------------\\n')\n print('Best parameters from CV: {0}'.format(self.model_rf.best_params_))\n print('-------------------------\\n')\n print('Best accuracy score in CV: {0:.3f}'.format(self.model_rf.best_score_))\n print('Best model accuracy score in test: {0:.3f}'.format(accuracy_score(self.y_test, self.model_rf.best_estimator_.predict(self.X_test))))\n print('Best model ROC in test: {0:.3f}'.format(roc_auc_score(self.y_test, self.model_rf.best_estimator_.predict_proba(self.X_test)[:,1])))\n print('-------------------------\\n')\n print('Confussion matrix in test:\\n')\n print(pd.crosstab(self.y_test, self.model_rf.best_estimator_.predict(self.X_test), normalize=True)*100)\n print('Classification report in test: \\n')\n print(classification_report(self.y_test, self.model_rf.best_estimator_.predict(self.X_test)))\n\n def fit_model_whole_data(self):\n self.final_model = self.model_rf.best_estimator_.fit(self.features, self.label)\n\n def save_model(self, model_file_name='acceptance_model.sav'):\n joblib.dump(self.final_model, '02_app/' + model_file_name)\n\n def save_feature_col_names(self, features_colnames_path = 'var_names.pkl'):\n with open('02_app/' + features_colnames_path, 'wb') as f:\n pickle.dump(self.features_col_names, f)","sub_path":"03_scripts/02_model_fitting/fit_model_class.py","file_name":"fit_model_class.py","file_ext":"py","file_size_in_byte":3577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"262761524","text":"# Polyline drawing problem\r\n\r\n###################################################\r\n# Student should enter code below\r\n\r\nimport SimpleGUICS2Pygame.simpleguics2pygame as simplegui\r\nimport math\r\n\r\npolyline = []\r\n\r\n\r\n# define mouseclick handler\r\ndef click(pos):\r\n \r\n polyline.append(pos)\r\n print(polyline)\r\n \r\n# button to clear canvas\r\ndef clear():\r\n global polyline\r\n polyline =[]\r\n print(polyline)\r\n\r\n# define draw\r\ndef draw(canvas):\r\n if len(polyline) > 0:\r\n canvas.draw_circle(polyline[0], 1, 1, \"Red\", \"Red\") \r\n canvas.draw_polyline(polyline, 4, 'Red')\r\n \r\n \r\n# create frame and register handlers\r\nframe = simplegui.create_frame(\"Echo click\", 300, 200)\r\nframe.set_mouseclick_handler(click)\r\nframe.set_draw_handler(draw)\r\nframe.add_button(\"Clear\", clear)\r\n\r\n# start frame\r\nframe.start()\r\n\r\n","sub_path":"IntProg2_Ex_1_6.py","file_name":"IntProg2_Ex_1_6.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"349383286","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 18 10:03:59 2021\n\n@author: luke\n\"\"\"\n\n# Package ID: knb-lter-ntl.10001.3 Cataloging System:https://pasta.lternet.edu.\n# Data set title: Globally distributed lake surface water temperatures collected in situ and by \t\t\tsatellites; 1985-2009.\n\n# \n# This program creates numbered PANDA dataframes named dt1,dt2,dt3...,\n# one for each data table in the dataset. It also provides some basic\n# summaries of their contents. NumPy and Pandas modules need to be installed\n# for the program to run. \n\n\n\n# =============================================================================\n# import\n# =============================================================================\n\n\n\nimport numpy as np\nimport pandas as pd \nimport os\nimport xarray as xr\nfrom scipy import stats as sts\nimport matplotlib.pyplot as plt\nimport seaborn as sb\nimport geopandas as gpd\nfrom shapely.geometry import Polygon\nfrom shapely import wkt\nimport os\nimport gdal\nimport copy as cp\nfrom collections import OrderedDict\nfrom matplotlib.lines import Line2D\nfrom matplotlib.patches import Rectangle\nfrom matplotlib.patches import Patch\nimport cartopy.crs as ccrs\nimport cartopy.feature as cfeature \ncmaps = OrderedDict()\n\n\n\n\n# =============================================================================\n# functions\n# =============================================================================\n\n\n\ndef rasterize(feature_name,lon_min,lon_max,lat_min,lat_max,resolution,filename):\n \"\"\"\n This function rasterizes a .shp file and saves it as a .tiff in the same directory\n Only for global extent\n input: feature_name: Fieldname of shapefile to be burned in raster\n resolution: horizontal resolution in degrees \n filename: input and output filename\n \"\"\"\n # define command\n command = 'gdal_rasterize -a '+ feature_name\\\n + ' -ot Float32 -of GTiff -te '+ str(lon_min)+' '+str(lat_min)+' '+str(lon_max)+' '+str(lat_max)+' -tr ' + str(resolution) +' '+ str(resolution)\\\n + ' -co COMPRESS=DEFLATE -co PREDICTOR=1 -co ZLEVEL=6 -l '+ filename\\\n + ' ' + filename+'.shp ' + filename +'.tiff'\n\n os.system(command) \n\ndef read_raster(filename):\n \"\"\"\n Function to read raster file\n input: file name of raster (ends in .tiff)\n output: 2D numpy array\n \"\"\"\n raster = gdal.Open(filename)\n myarray = np.array(raster.GetRasterBand(1).ReadAsArray())\n myarray = np.flipud(myarray)\n\n return myarray\n\ndef slope_field(xarr): \n \n # getting shapes\n m = np.prod(xarr.shape[1:]).squeeze()\n n = xarr.shape[0]\n \n # creating x and y variables for linear regression\n # x = xarr.time.to_pandas().index.to_julian_date().values[:, None]\n x = xarr.time.dt.year.values[:,None]\n y = xarr.to_masked_array().reshape(n, -1)\n \n # ############################ #\n # LINEAR REGRESSION DONE BELOW #\n xm = x.mean(0) # mean\n ym = y.mean(0) # mean\n ya = y - ym # anomaly\n xa = x - xm # anomaly\n \n # variance and covariances\n xss = (xa ** 2).sum(0) / (n - 1) # variance of x (with df as n-1)\n yss = (ya ** 2).sum(0) / (n - 1) # variance of y (with df as n-1)\n xys = (xa * ya).sum(0) / (n - 1) # covariance (with df as n-1)\n # slope and intercept\n slope = xys / xss\n intercept = ym - (slope * xm)\n # statistics about fit\n df = n - 2\n r = xys / (xss * yss)**0.5\n t = r * (df / ((1 - r) * (1 + r)))**0.5\n p = sts.distributions.t.sf(abs(t), df)\n \n # preparing outputs\n out = xarr[:2].mean('time')\n # first create variable for slope and adjust meta\n xarr_slope = out.copy()\n xarr_slope.name = '_slope'\n xarr_slope.attrs['units'] = 'K / year'\n xarr_slope.values = slope.reshape(xarr.shape[1:])\n # do the same for the p value\n xarr_p = out.copy()\n xarr_p.name = '_Pvalue'\n xarr_p.attrs['info'] = \"If p < 0.05 then the results from 'slope' are significant.\"\n xarr_p.values = p.reshape(xarr.shape[1:])\n # join these variables\n xarr_out = xarr_slope.to_dataset(name='slope')\n xarr_out['pval'] = xarr_p\n\n #return xarr_out\n return xarr_slope,xarr_p\n\n\ndef pixel(arr,\n lon,\n lat,\n out_arr = False):\n \n if out_arr == False:\n series = arr.sel(lon=lon,\n lat=lat,\n drop=True).squeeze().values.item()\n elif out_arr == True:\n series = arr.sel(lon=lon,\n lat=lat,\n drop=True).squeeze()\n \n return series\n\n\ndef df_indexer(slope_arr,\n series_arr,\n df,\n lon,\n lat):\n \n val = df.loc[(df['lat'] == lat) & (df['lon'] == lon),'arr1'].item()\n \n latx = slope_arr.where(slope_arr == val,drop=True).squeeze().lat.values.item()\n lonx = slope_arr.where(slope_arr == val,drop=True).squeeze().lon.values.item()\n \n series = series_arr.sel(lat=latx,\n lon=lonx,\n drop=True).squeeze()\n \n series = series.interpolate_na(dim='time')\n \n return series\n \n\ndef arr_to_df(arr1,\n arr2):\n \n \"\"\" Take two arrays (matching ERA5L and obs). For each significant obs trend\n in arr1, take lat + lon coords, find value for this coord in ERA5L and append \n arr1 value, arr2 value, lat and lon to dataframe.\n \n Parameters\n ----------\n arr1 : obs\n arr2 : ERA5L\n \n Returns\n ------- \n Pandas dataframe\n \"\"\"\n \n frame = {'arr1':[],'arr2':[],'lat':[],'lon':[]}\n df = pd.DataFrame(data=frame)\n vals = arr1.values.flatten()\n data = vals[~np.isnan(vals)]\n for d in data: \n d_coords = arr1.where(arr1==d,drop=True).squeeze()\n lat = round(d_coords.lat.values.item(),1)\n lon = round(d_coords.lon.values.item(),1)\n e = pixel(arr2,\n lon,\n lat,\n out_arr=False)\n df = df.append({'arr1':d,'arr2':e,'lat':lat,'lon':lon}, ignore_index=True)\n \n return df.dropna()\n\n\ndef ensembler(data):\n concat_dim = np.arange(len(data))\n aligned = xr.concat(data,dim=concat_dim)\n ens_mean = aligned.mean(dim='concat_dim')\n ens_std = aligned.std(dim='concat_dim')\n ens_max = aligned.max(dim='concat_dim')\n ens_min = aligned.min(dim='concat_dim')\n ens_roll = ens_mean.rolling(time=5, center=True).mean()\n dict_ens = {}\n dict_ens['mean'] = ens_mean\n dict_ens['std'] = ens_std\n dict_ens['max'] = ens_max\n dict_ens['min'] = ens_min\n dict_ens['roll'] = ens_roll\n return dict_ens\n\ndef plotter(time,\n ens_mean,\n ens_std,\n ens_max,\n ens_min,\n ens_roll,\n ax,\n lw_mean,\n lw_roll,\n col_mean,\n col_fill_a,\n col_fill_b,\n ub_alpha):\n \n ens_mean = ens_mean.values\n ens_std = ens_std.values\n ens_max = ens_max.values\n ens_min = ens_min.values\n ens_roll = ens_roll.values\n \n # plot mean line\n h = ax.plot(time, \n ens_mean,\n lw=lw_mean, \n color=col_mean, \n zorder=4)\n \n # plot mean line\n h = ax.plot(time, \n ens_roll,\n lw=lw_roll, \n color=col_fill_a, \n zorder=3)\n \n return h,ax\n\n\ndef tser_plotter(series_insitu,\n series_satellite,\n colors_insitu,\n colors_satellite,\n x,\n y,\n xmin,\n xmax,\n ymin,\n ymax,\n labels,\n xticks,\n xtick_labels,\n tick_font,\n title_font,\n axis_font,\n legend_font,\n legend_entrylen,\n legend_entrypad,\n legendcols,\n xlabel_xpos,\n xlabel_ypos,\n xlabel,\n ylabel_xpos,\n ylabel_ypos,\n ylabel,\n ub_alpha,\n letters):\n\n f, (ax1,ax2) = plt.subplots(2,1,figsize=(x,y))\n \n time = np.arange(1985,2010)\n \n for s,c in zip(series_insitu,colors_insitu):\n \n h,ax1 = plotter(time,\n s['mean'],\n s['std'],\n s['max'],\n s['min'],\n s['roll'],\n ax1,\n lw_mean,\n lw_roll,\n c['mean'],\n c['fill_a'],\n c['fill_b'],\n ub_alpha)\n \n for s,c in zip(series_satellite,colors_satellite):\n \n h,ax2 = plotter(time,\n s['mean'],\n s['std'],\n s['max'],\n s['min'],\n s['roll'],\n ax2,\n lw_mean,\n lw_roll,\n c['mean'],\n c['fill_a'],\n c['fill_b'],\n ub_alpha)\n \n count = 0\n for ax in (ax1,ax2): \n ax.set_xlim(xmin,xmax)\n ax.set_ylim(ymin,ymax)\n ax.xaxis.set_ticks(xticks)\n ax.tick_params(labelsize=tick_font,axis=\"x\",direction=\"in\", left=\"off\",labelleft=\"on\")\n ax.tick_params(labelsize=tick_font,axis=\"y\",direction=\"in\")\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.yaxis.grid(color='0.8', linestyle='dashed', linewidth=0.5)\n ax.xaxis.grid(color='0.8', linestyle='dashed', linewidth=0.5)\n ax.set_axisbelow(True)\n ax.set_title(letters[count],loc='left',fontsize=title_font,fontweight='bold')\n count += 1\n \n \n ax1.xaxis.set_ticklabels([])\n ax2.xaxis.set_ticklabels(xtick_labels)\n \n ax1.legend(handles,\n labels,\n bbox_to_anchor=(x0, y0, xlen, ylen), \n loc=3, #bbox: (x, y, width, height)\n ncol=3,\n fontsize=legend_font, \n mode=\"expand\", \n borderaxespad=0.,\\\n frameon=False, \n columnspacing=0.05, \n handlelength=legend_entrylen, \n handletextpad=legend_entrypad)\n \n # labels\n f.text(xlabel_xpos, xlabel_ypos, xlabel, ha='center', fontsize=axis_font)\n f.text(ylabel_xpos, ylabel_ypos, ylabel, va='center', rotation='vertical', fontsize=axis_font)\n \n# =============================================================================\n# f.savefig('gltc_tseries.png',bbox_inches='tight',dpi=200)\n# =============================================================================\n \n \ndef map_plotter(proj,\n extent,\n insitu_pts,\n satellite_pts,\n col_insitu,\n col_satellite,\n lab_insitu,\n lab_satellite):\n \n f = plt.figure(figsize=(10,5))\n proj = ccrs.Robinson()\n ax = plt.axes(projection=ccrs.PlateCarree())\n ax.set_extent(extent, crs=ccrs.PlateCarree())\n \n insitu_pts.plot(ax=ax,\n markersize=4,\n color=col_insitu['mean'],\n zorder=2,\n transform=ccrs.PlateCarree())\n satellite_pts.plot(ax=ax,\n markersize=4,\n color=col_satellite['mean'],\n zorder=2,\n transform=ccrs.PlateCarree())\n \n ax.add_feature(cfeature.LAND, \n zorder=1, \n edgecolor='black')\n \n legend_handles = [Line2D([0], [0],\n marker='o',\n color='w',\n label=lab_insitu,\n markerfacecolor=col_insitu['mean']),\n Line2D([0], [0],\n marker='o',\n color='w',\n label=lab_satellite,\n markerfacecolor=col_satellite['mean'])]\n \n ax.legend(handles=legend_handles,\n frameon=False)\n\n# =============================================================================\n# f.savefig('gltc_locations.png',bbox_inches='tight',dpi=200)\n# =============================================================================\n\ndef map_plotter_test(proj,\n extent,\n glrp_pts,\n col_glrp):\n \n f = plt.figure(figsize=(10,5))\n proj = ccrs.Robinson()\n ax = plt.axes(projection=ccrs.PlateCarree())\n ax.set_extent(extent, crs=ccrs.PlateCarree())\n \n glrp_pts.plot(ax=ax,\n markersize=4,\n color=col_glrp['mean'],\n zorder=2,\n transform=ccrs.PlateCarree())\n \n ax.add_feature(cfeature.LAND, \n zorder=1, \n edgecolor='black')\n \n legend_handles = [Line2D([0], [0],\n marker='o',\n color='w',\n label='GLRP locations',\n markerfacecolor=col_glrp['mean'])]\n \n ax.legend(handles=legend_handles,\n frameon=False)\n\n# =============================================================================\n# f.savefig('gltc_locations_og.png',bbox_inches='tight',dpi=200)\n# =============================================================================\n\ndef c(x):\n col = plt.cm.Greys(x)\n fig, ax = plt.subplots(figsize=(1,1))\n fig.set_facecolor(col)\n ax.axis(\"off\")\n plt.show()\n\n\n\n# =============================================================================\n# settings\n# =============================================================================\n\n\n\ntitle_font = 9\ntick_font = 8\naxis_font = 9\nlegend_font = 8\n\n#========== LINE THICKNESS ==========#\n\n# mean line thickness\nlw_mean = 1.5\nlw_roll = 0.75\n\n#========== PLOT COLORS ==========#\n\ncol_pimean = 'blue' # picontrol mean color\ncol_pifill = '#a6bddb' # picontrol fill color\ncol_histmean = '0.3' # historical mean color\ncol_histfill = '0.75' # historical fill color\ncol_rcp26mean = 'darkgreen' # rcp26 mean color\ncol_rcp26fill = '#adebad' # rcp26 fill color\ncol_rcp60mean = 'darkgoldenrod' # rcp60 mean color\ncol_rcp60fill = '#ffec80' # rcp60 fill color\ncol_rcp85mean = 'darkred' # rcp85 mean color\ncol_rcp85fill = '#F08080' # rcp85 fill color\n\ncol_insitu = {}\ncol_satellite = {}\ncol_era = {}\n\ncol_insitu['mean'] = plt.cm.YlOrBr(0.9)\ncol_insitu['fill_a'] = plt.cm.YlOrBr(0.7)\ncol_insitu['fill_b'] = plt.cm.YlOrBr(0.4)\ncol_satellite['mean'] = plt.cm.Greens(0.9)\ncol_satellite['fill_a'] = plt.cm.Greens(0.7)\ncol_satellite['fill_b'] = plt.cm.Greens(0.4)\ncol_era['mean'] = plt.cm.Greys(0.9)\ncol_era['fill_a'] = plt.cm.Greys(0.7)\ncol_era['fill_b'] = plt.cm.Greys(0.4)\n\ncolors_insitu = [col_era,\n col_insitu]\n\ncolors_satellite = [col_era,\n col_satellite]\n\nub_alpha = 0.5\n\n#========== AXII ==========#\n\n# figsize = (x,y)\nx = 8\ny = 5\n\n# subplots_adjust\nhspace = 0.5\ntop = 0.9\n\nymin = -2 # ymin\nymax = 2 # ymax\nxmin = 1985 # xmin\nxmax = 2010 # xmax\n\n# x ticks/labels \nxticks = np.arange(1985,2015,5)\nxtick_labels = [None,1990,None,2000,None,2010]\n\n# x axis label\nxlabel = 'Years'\nxlabel_xpos = 0.5\nxlabel_ypos = 0.05\n\n# y axis label\nylabel = 'Lake temperature anomaly (°C)'\nylabel_xpos = 0.075\nylabel_ypos = 0.535\n\n# xaxis tick label sharing\naxis_share = False\n\n#========== LEGEND ==========#\n\n# labels\nlab_insitu = 'GLTC insitu'\nlab_satellite = 'GLTC satellite'\nlab_era = 'ERA5L'\n\n# bbox\nx0 = 0.5\ny0 = 1\nxlen = 0.5\nylen = 0.9\n\n# space between entries\nlegend_entrypad = 0.5\n\n# length per entry\nlegend_entrylen = 0.75\n\n# legend colors\nlegendcols = [col_era['mean'],\n col_insitu['mean'],\n col_satellite['mean']]\n\nhandles = [Line2D([0],[0],linestyle='-',lw=2,color=legendcols[0]),\\\n Line2D([0],[0],linestyle='-',lw=2,color=legendcols[1]),\\\n Line2D([0],[0],linestyle='-',lw=2,color=legendcols[2])]\n# labels\nlab_insitu = 'GLTC insitu'\nlab_satellite = 'GLTC satellite'\nlab_era = 'ERA5L'\n\nlabels= [lab_era,\n lab_insitu,\n lab_satellite]\n\nletters = ['a','b','c','d']\n\n# =============================================================================\n# retrieve data\n# =============================================================================\n\n\n\ninfile1 =\"https://pasta.lternet.edu/package/data/eml/knb-lter-ntl/10001/3/6e52deaa45c1695e7742c923ba04d16b\".strip() \ninfile1 = infile1.replace(\"https://\",\"http://\")\n \ndt1 =pd.read_csv(infile1,\n skiprows=1,\n sep=\",\",\n names=[\"recordID\", \n \"variable\", \n \"year\", \n \"siteID\", \n \"value\"])\n\n# Coerce the data into the types specified in the metadata \ndt1.recordID=dt1.recordID.astype('category') \ndt1.variable=dt1.variable.astype('category') \ndt1.year=pd.to_numeric(dt1.year,errors='coerce',downcast='integer') \ndt1.siteID=dt1.siteID.astype('category') \ndt1.value=pd.to_numeric(dt1.value,errors='coerce') \n \nprint(\"Here is a description of the data frame dt1 and number of lines\\n\")\nprint(dt1.info())\nprint(\"--------------------\\n\\n\") \nprint(\"Here is a summary of numerical variables in the data frame dt1\\n\")\nprint(dt1.describe())\nprint(\"--------------------\\n\\n\") \n \nprint(\"The analyses below are basic descriptions of the variables. After testing, they should be replaced.\\n\") \n\nprint(dt1.recordID.describe()) \nprint(\"--------------------\\n\\n\")\n \nprint(dt1.variable.describe()) \nprint(\"--------------------\\n\\n\")\n \nprint(dt1.year.describe()) \nprint(\"--------------------\\n\\n\")\n \nprint(dt1.siteID.describe()) \nprint(\"--------------------\\n\\n\")\n \nprint(dt1.value.describe()) \nprint(\"--------------------\\n\\n\")\n \n \ninfile2 =\"https://pasta.lternet.edu/package/data/eml/knb-lter-ntl/10001/3/6167b9938e8dc99e9ee75251c70776a9\".strip() \ninfile2 = infile2.replace(\"https://\",\"http://\")\n \ndt2 =pd.read_csv(infile2, \n skiprows=1,\n sep=\",\" ,\n quotechar='\"' ,\n names=[\"siteID\", \n \"Lake_name\", \n \"Other_names\", \n \"lake_or_reservoir\", \n \"location\", \n \"region\", \n \"latitude\", \n \"longitude\", \n \"geospatial_accuracy_km\", \n \"elevation_m\", \n \"mean_depth_m\", \n \"max_depth_m\", \n \"surface_area_km2\", \n \"volume_km3\", \n \"source\", \n \"sampling_depth\", \n \"sampling_time_of_day\", \n \"time_period\", \n \"contributor\"], \n encoding = \"unicode_escape\")\n\n# Coerce the data into the types specified in the metadata \ndt2.siteID=dt2.siteID.astype('category') \ndt2.Lake_name=dt2.Lake_name.astype('category') \ndt2.Other_names=dt2.Other_names.astype('category') \ndt2.lake_or_reservoir=dt2.lake_or_reservoir.astype('category') \ndt2.location=dt2.location.astype('category') \ndt2.region=dt2.region.astype('category') \ndt2.latitude=pd.to_numeric(dt2.latitude,errors='coerce') \ndt2.longitude=pd.to_numeric(dt2.longitude,errors='coerce') \ndt2.geospatial_accuracy_km=pd.to_numeric(dt2.geospatial_accuracy_km,errors='coerce') \ndt2.elevation_m=pd.to_numeric(dt2.elevation_m,errors='coerce') \ndt2.mean_depth_m=pd.to_numeric(dt2.mean_depth_m,errors='coerce') \ndt2.max_depth_m=pd.to_numeric(dt2.max_depth_m,errors='coerce') \ndt2.surface_area_km2=pd.to_numeric(dt2.surface_area_km2,errors='coerce') \ndt2.volume_km3=pd.to_numeric(dt2.volume_km3,errors='coerce') \ndt2.source=dt2.source.astype('category') \ndt2.sampling_depth=dt2.sampling_depth.astype('category') \ndt2.sampling_time_of_day=dt2.sampling_time_of_day.astype('category') \ndt2.time_period=dt2.time_period.astype('category') \ndt2.contributor=dt2.contributor.astype('category') \n \nprint(\"Here is a description of the data frame dt2 and number of lines\\n\")\nprint(dt2.info())\nprint(\"--------------------\\n\\n\") \nprint(\"Here is a summary of numerical variables in the data frame dt2\\n\")\nprint(dt2.describe())\nprint(\"--------------------\\n\\n\") \n \nprint(\"The analyses below are basic descriptions of the variables. After testing, they should be replaced.\\n\") \n\nprint(dt2.siteID.describe()) \nprint(\"--------------------\\n\\n\")\n \nprint(dt2.Lake_name.describe()) \nprint(\"--------------------\\n\\n\")\n \nprint(dt2.Other_names.describe()) \nprint(\"--------------------\\n\\n\")\n \nprint(dt2.lake_or_reservoir.describe()) \nprint(\"--------------------\\n\\n\")\n \nprint(dt2.location.describe()) \nprint(\"--------------------\\n\\n\")\n \nprint(dt2.region.describe()) \nprint(\"--------------------\\n\\n\")\n \nprint(dt2.latitude.describe()) \nprint(\"--------------------\\n\\n\")\n \nprint(dt2.longitude.describe()) \nprint(\"--------------------\\n\\n\")\n \nprint(dt2.geospatial_accuracy_km.describe()) \nprint(\"--------------------\\n\\n\")\n \nprint(dt2.elevation_m.describe()) \nprint(\"--------------------\\n\\n\")\n \nprint(dt2.mean_depth_m.describe()) \nprint(\"--------------------\\n\\n\")\n \nprint(dt2.max_depth_m.describe()) \nprint(\"--------------------\\n\\n\")\n \nprint(dt2.surface_area_km2.describe()) \nprint(\"--------------------\\n\\n\")\n \nprint(dt2.volume_km3.describe()) \nprint(\"--------------------\\n\\n\")\n \nprint(dt2.source.describe()) \nprint(\"--------------------\\n\\n\")\n \nprint(dt2.sampling_depth.describe()) \nprint(\"--------------------\\n\\n\")\n \nprint(dt2.sampling_time_of_day.describe()) \nprint(\"--------------------\\n\\n\")\n \nprint(dt2.time_period.describe()) \nprint(\"--------------------\\n\\n\")\n \nprint(dt2.contributor.describe()) \nprint(\"--------------------\\n\\n\")\n\n\n\n# =============================================================================\n# dataframes processed by Luke\n# =============================================================================\n \n\n \nstring_1 = \"Lake_Temp_Summer_InSitu\"\nstring_2 = \"Lake_Temp_Summer_Satellite\"\n\ndata_insitu = dt1.loc[dt1['variable'] == string_1]\ndata_satellite = dt1.loc[dt1['variable'] == string_2]\n\nmetadata = dt2[['siteID','latitude','longitude','surface_area_km2']]\n\ndata_insitu = pd.merge(data_insitu,metadata,on=\"siteID\").drop(columns=['siteID','recordID','variable'])\ndata_satellite = pd.merge(data_satellite,metadata,on=\"siteID\").drop(columns=['siteID','recordID','variable'])\n\ndict_insitu = {}\ndict_satellite = {}\n\nyears = sorted(data_insitu['year'].unique())\n\nfor i in years:\n dict_insitu[str(i)] = data_insitu.loc[data_insitu['year'] == i]\n dict_satellite[str(i)] = data_satellite.loc[data_satellite['year'] == i]\n\n\n# reformulated target grid\nres = 0.1\nlons = np.arange(-180,180+res,res)\nlats = np.arange(-90,90.1+res,res)\n\n\n\n# =============================================================================\n# data conversion by Inne \n# =============================================================================\n\n\n\n# intialize empty numpy array \nvalues_insitu = np.empty((len(years),len(lats)-1,len(lons)-1))\nvalues_satellite = np.empty((len(years),len(lats)-1,len(lons)-1))\n\n# loop over years\nfor i,year in enumerate(years): \n\n print(year)\n # select dataframe of certain year\n data_year_insitu = dict_insitu[str(year)] \n data_year_satellite = dict_satellite[str(year)] \n\n # turn pandas dataframes in geopandas with lat and lon as geometry\n gdf_insitu = gpd.GeoDataFrame(data_year_insitu, \n geometry=gpd.points_from_xy(data_year_insitu.longitude, \n data_year_insitu.latitude), \n crs=\"EPSG:4326\")\n gdf_satellite = gpd.GeoDataFrame(data_year_satellite, \n geometry=gpd.points_from_xy(data_year_satellite.longitude, \n data_year_satellite.latitude), \n crs=\"EPSG:4326\")\n\n fn_insitu='insitu_points_'+str(year)\n fn_satellite='satellite_points_'+str(year)\n\n # save as shapefile to convert to raster\n gdf_insitu.to_file(fn_insitu+'.shp')\n gdf_satellite.to_file(fn_satellite+'.shp')\n\n # rasterize grid polygon to tiff file and read in as numpy array\n rasterize('value',lons[0],lons[-1],lats[0],lats[-1], res, fn_insitu)\n rasterize('value',lons[0],lons[-1],lats[0],lats[-1], res, fn_satellite)\n\n # read rasterized values into numpy array \n year_values_insitu = read_raster(fn_insitu+'.tiff')\n year_values_satellite = read_raster(fn_satellite+'.tiff')\n\n # clean up\n os.remove(fn_insitu+'.shp')\n os.remove(fn_insitu+'.cpg')\n os.remove(fn_insitu+'.prj')\n os.remove(fn_insitu+'.shx')\n os.remove(fn_insitu+'.dbf')\n os.remove(fn_insitu+'.tiff')\n os.remove(fn_satellite+'.shp')\n os.remove(fn_satellite+'.cpg')\n os.remove(fn_satellite+'.prj')\n os.remove(fn_satellite+'.shx')\n os.remove(fn_satellite+'.dbf')\n os.remove(fn_satellite+'.tiff')\n\n # save values in numpy array\n values_insitu[i,:,:] = year_values_insitu\n # save values in numpy array\n values_satellite[i,:,:] = year_values_satellite\n\nvalues_insitu[values_insitu == 0] = np.nan # there are no 0 temperatures in the dataset\nvalues_satellite[values_satellite == 0] = np.nan # there are no 0 temperatures in the dataset\n\n\nlongitudes = np.arange(0,360,res)\nlatitudes = np.arange(-90,90+res,res)\n\ntime = pd.date_range(start='1985-01-01',end='2009-01-01',freq='YS')\n\n# data arrays of insitu and satellite obs\nda_insitu = xr.DataArray(values_insitu, coords=[time,latitudes,longitudes], dims=[\"time\", \"lat\", \"lon\"])\nda_satellite = xr.DataArray(values_satellite, coords=[time,latitudes,longitudes], dims=[\"time\", \"lat\", \"lon\"])\n\n\n\n# =============================================================================\n# slope\n# =============================================================================\n\n\n\n# slope calculations; mask for significant obs trends only\nslope_insitu,pval_insitu = slope_field(da_insitu)\nslope_satellite,pval_satellite = slope_field(da_satellite)\n\n# =============================================================================\n# slope_insitu_signif = slope_insitu.where(pval_insitu<0.05)\n# slope_satellite_signif = slope_satellite.where(pval_satellite<0.05)\n# =============================================================================\n\n\n# insitu obs slope\nslope_insitu_jas = slope_insitu.where(slope_insitu.lat >= 23.5,drop=True).squeeze()\nslope_insitu_jas_sh = slope_insitu.sel(lat=slice(-23.45,-0.05))\n\nslope_insitu_jfm = slope_insitu.where(slope_insitu.lat <= -23.5,drop=True).squeeze()\nslope_insitu_jfm_nh = slope_insitu.sel(lat=slice(0.05,23.45))\n\n# satellite obs slope\nslope_satellite_jas = slope_satellite.where(slope_satellite.lat >= 23.5,drop=True).squeeze()\nslope_satellite_jas_sh = slope_satellite.sel(lat=slice(-23.45,-0.05))\n\nslope_satellite_jfm = slope_satellite.where(slope_satellite.lat <= -23.5,drop=True).squeeze()\nslope_satellite_jfm_nh = slope_satellite.sel(lat=slice(0.05,23.45))\n\n\n# reanalysis read in\nos.chdir(\"/home/luke/documents/data/gltc/knb-lter-ntl.10001.3/final/\")\n\nera5l_jas_file = \"era5-land_lakes_lmlt_JAS_1985_2009.nc\"\nera5l_jas_sh_file = \"era5-land_lakes_lmlt_JAS_sh_1985_2009.nc\"\n\nera5l_jfm_file = \"era5-land_lakes_lmlt_JFM_1985_2009.nc\"\nera5l_jfm_nh_file = \"era5-land_lakes_lmlt_JFM_nh_1985_2009.nc\"\n\nda_era5l_jas = xr.open_dataset(era5l_jas_file,decode_times=False).lmlt\nda_era5l_jas['time'] = time\nda_era5l_jas = da_era5l_jas.rename({'longitude':'lon',\n 'latitude':'lat'})\n\nda_era5l_jas_sh = xr.open_dataset(era5l_jas_sh_file,decode_times=False).lmlt\nda_era5l_jas_sh['time'] = time\nda_era5l_jas_sh = da_era5l_jas_sh.rename({'longitude':'lon',\n 'latitude':'lat'})\n\nda_era5l_jfm = xr.open_dataset(era5l_jfm_file,decode_times=False).lmlt\nda_era5l_jfm['time'] = time\nda_era5l_jfm = da_era5l_jfm.rename({'longitude':'lon',\n 'latitude':'lat'})\n\nda_era5l_jfm_nh = xr.open_dataset(era5l_jfm_nh_file,decode_times=False).lmlt\nda_era5l_jfm_nh['time'] = time\nda_era5l_jfm_nh = da_era5l_jfm_nh.rename({'longitude':'lon',\n 'latitude':'lat'})\n\n\n# slope calculations\nslope_era5l_jas, _ = slope_field(da_era5l_jas)\nslope_era5l_jas_sh, _ = slope_field(da_era5l_jas_sh)\n\nslope_era5l_jfm, _ = slope_field(da_era5l_jfm)\nslope_era5l_jfm_nh, _ = slope_field(da_era5l_jfm_nh)\n\n# dataframes for insitu\ninsitu_jas = arr_to_df(slope_insitu_jas,\n slope_era5l_jas)\ninsitu_jas_sh = arr_to_df(slope_insitu_jas_sh,\n slope_era5l_jas_sh)\ninsitu_jfm = arr_to_df(slope_insitu_jfm,\n slope_era5l_jfm)\ninsitu_jfm_nh = arr_to_df(slope_insitu_jfm_nh,\n slope_era5l_jfm_nh)\n\n# dataframes for satellite\nsatellite_jas = arr_to_df(slope_satellite_jas,\n slope_era5l_jas)\nsatellite_jas_sh = arr_to_df(slope_satellite_jas_sh,\n slope_era5l_jas_sh)\nsatellite_jfm = arr_to_df(slope_satellite_jfm,\n slope_era5l_jfm)\nsatellite_jfm_nh = arr_to_df(slope_satellite_jfm_nh,\n slope_era5l_jfm_nh)\n\nframes_insitu = [insitu_jas,\n insitu_jas_sh,\n insitu_jfm,\n insitu_jfm_nh]\n\nframes_satellite = [satellite_jas,\n satellite_jas_sh,\n satellite_jfm,\n satellite_jfm_nh]\n\n# final data array with all obs-era pairs for significant obs trends\ngltc_insitu = pd.concat(frames_insitu)\ngltc_satellite = pd.concat(frames_satellite)\n\n\n\n# =============================================================================\n# slope scatterplot\n# =============================================================================\n\n\n\n# adding columns for source and recombining for hue\ninsitu_cp = cp.deepcopy(insitu_jas)\nsatellite_cp = cp.deepcopy(satellite_jas)\ninsitu_cp['source'] = 'insitu'\nsatellite_cp['source'] = 'satellite'\ndltc_cp = pd.concat([insitu_cp,satellite_cp])\n\nslope, intercept, r_value, p_value, std_err = sts.linregress(gltc_df['arr2'],\n gltc_df['arr1'])\nr_sq = r_value**2\nlabel = r'$R^2:{0:.2f}$'.format(r_sq)\n\nax = sb.lmplot(data=gltc_df,\n x='arr2',\n y='arr1',\n line_kws={'label':\"r'$R^2:{0:.2f}$'\".format(r_sq)},\n legend=True)\nax.set(xlabel = \"ERA5L slope\",\n ylabel = \"GLTC slope\")\n\n\n\n# =============================================================================\n# time series plots\n# =============================================================================\n\n\n\n# only take series from era5_jas (no other latitude ranges contribute to significant slopes in obs)\nera_insitu_series_list = []\nera_satellite_series_list= []\ninsitu_series_list = []\nsatellite_series_list = []\n\n# get series' for pixels in insitu data\nfor lon,lat in zip(gltc_insitu['lon'].values,gltc_insitu['lat'].values):\n \n era_series = pixel(da_era5l_jas,\n lon,\n lat,\n out_arr=True)\n \n era_series = era_series - 273.15\n era_series = era_series - era_series.mean(dim='time')\n \n insitu_series = df_indexer(slope_insitu_jas,\n da_insitu,\n insitu_jas,\n lon,\n lat)\n insitu_series = insitu_series - insitu_series.mean(dim='time')\n \n era_series = era_series.where(era_series.time == insitu_series.time)\n \n era_insitu_series_list.append(era_series)\n insitu_series_list.append(insitu_series)\n \n# series' for pixels in satellite jas data\nfor lon,lat in zip(satellite_jas['lon'].values,satellite_jas['lat'].values):\n \n era_series = pixel(da_era5l_jas,\n lon,\n lat,\n out_arr=True)\n \n era_series = era_series - 273.15\n era_series = era_series - era_series.mean(dim='time')\n \n satellite_series = df_indexer(slope_satellite_jas,\n da_satellite,\n satellite_jas,\n lon,\n lat)\n satellite_series = satellite_series - satellite_series.mean(dim='time')\n \n era_series = era_series.where(era_series.time == satellite_series.time)\n \n era_satellite_series_list.append(era_series)\n satellite_series_list.append(satellite_series)\n \n \n# dataframes for satellite\nsatellite_jas = arr_to_df(slope_satellite_jas,\n slope_era5l_jas)\nsatellite_jas_sh = arr_to_df(slope_satellite_jas_sh,\n slope_era5l_jas_sh)\nsatellite_jfm = arr_to_df(slope_satellite_jfm,\n slope_era5l_jfm)\nsatellite_jfm_nh = arr_to_df(slope_satellite_jfm_nh,\n slope_era5l_jfm_nh)\n\n# series' for pixels in satellite jas data\nfor lon,lat in zip(satellite_jas['lon'].values,satellite_jas['lat'].values):\n \n era_series = pixel(da_era5l_jas,\n lon,\n lat,\n out_arr=True)\n \n era_series = era_series - 273.15\n era_series = era_series - era_series.mean(dim='time')\n \n satellite_series = df_indexer(slope_satellite_jas,\n da_satellite,\n satellite_jas,\n lon,\n lat)\n satellite_series = satellite_series - satellite_series.mean(dim='time')\n \n era_series = era_series.where(era_series.time == satellite_series.time)\n \n era_satellite_series_list.append(era_series)\n satellite_series_list.append(satellite_series)\n \n\ndict_era_insitu = ensembler(era_insitu_series_list)\ndict_era_satellite = ensembler(era_satellite_series_list)\ndict_insitu = ensembler(insitu_series_list)\ndict_satellite = ensembler(satellite_series_list)\n\n\nseries_insitu = [dict_era_insitu,\n dict_insitu]\n\nseries_satellite = [dict_era_satellite,\n dict_satellite]\n\ntser_plotter(series_insitu,\n series_satellite,\n colors_insitu,\n colors_satellite,\n x,\n y,\n xmin,\n xmax,\n ymin,\n ymax,\n labels,\n xticks,\n xtick_labels,\n tick_font,\n title_font,\n axis_font,\n legend_font,\n legend_entrylen,\n legend_entrypad,\n legendcols,\n xlabel_xpos,\n xlabel_ypos,\n xlabel,\n ylabel_xpos,\n ylabel_ypos,\n ylabel,\n ub_alpha,\n letters)\n\n\n\n# =============================================================================\n# map of observations\n# =============================================================================\n\n\nos.chdir('/home/luke/documents/data/gltc/knb-lter-ntl.10001.3/final/')\nproj = ccrs.Robinson()\nextent = [-180, 180, -65, 90]\n\n# dataframes for insitu\ninsitu_jas = arr_to_df(slope_insitu_jas,\n slope_era5l_jas)\ninsitu_jas_sh = arr_to_df(slope_insitu_jas_sh,\n slope_era5l_jas_sh)\ninsitu_jfm = arr_to_df(slope_insitu_jfm,\n slope_era5l_jfm)\ninsitu_jfm_nh = arr_to_df(slope_insitu_jfm_nh,\n slope_era5l_jfm_nh)\n\n# dataframes for satellite\nsatellite_jas = arr_to_df(slope_satellite_jas,\n slope_era5l_jas)\nsatellite_jas_sh = arr_to_df(slope_satellite_jas_sh,\n slope_era5l_jas_sh)\nsatellite_jfm = arr_to_df(slope_satellite_jfm,\n slope_era5l_jfm)\nsatellite_jfm_nh = arr_to_df(slope_satellite_jfm_nh,\n slope_era5l_jfm_nh)\n\ninsitu_jas['lon'] = insitu_jas.apply(lambda row: row.lon - 180,axis=1)\ninsitu_jas_sh['lon'] = insitu_jas_sh.apply(lambda row: row.lon - 180,axis=1)\ninsitu_jfm['lon'] = insitu_jfm.apply(lambda row: row.lon - 180,axis=1)\ninsitu_jfm_nh['lon'] = insitu_jfm_nh.apply(lambda row: row.lon - 180,axis=1)\n\ninsitu = pd.concat([insitu_jas,\n insitu_jas_sh,\n insitu_jfm,\n insitu_jfm_nh])\n\ninsitu_pts = gpd.GeoDataFrame(insitu,\n geometry=gpd.points_from_xy(insitu.lon, \n insitu.lat),\n crs=\"EPSG:4326\")\n\ninsitu_pts = insitu_pts.geometry\n\nsatellite_jas['lon'] = satellite_jas.apply(lambda row: row.lon - 180,axis=1)\nsatellite_jas_sh['lon'] = satellite_jas_sh.apply(lambda row: row.lon - 180,axis=1)\nsatellite_jfm['lon'] = satellite_jfm.apply(lambda row: row.lon - 180,axis=1)\nsatellite_jfm_nh['lon'] = satellite_jfm_nh.apply(lambda row: row.lon - 180,axis=1)\n\nsatellite = pd.concat([satellite_jas,\n satellite_jas_sh,\n satellite_jfm,\n satellite_jfm_nh])\n\nsatellite_pts = gpd.GeoDataFrame(satellite,\n geometry=gpd.points_from_xy(satellite.lon, \n satellite.lat),\n crs=\"EPSG:4326\")\n\nsatellite_pts = satellite_pts.geometry\n\nmap_plotter(proj,\n extent,\n insitu_pts,\n satellite_pts,\n col_insitu,\n col_satellite,\n lab_insitu,\n lab_satellite)\n\n# testing with data at original coordinates (after some subsampling)\n# =============================================================================\n# proj = ccrs.Robinson()\n# extent = [-180, 180, 20, 90]\n# data = pd.concat([data_insitu,data_satellite])\n# og_pts = gpd.GeoDataFrame(data,\n# geometry=gpd.points_from_xy(data.longitude, \n# data.latitude),\n# crs=\"EPSG:4326\")\n# \n# og_pts = og_pts.geometry\n# \n# og_pts = gdf_insitu.geometry\n# map_plotter_test(proj,\n# extent,\n# og_pts,\n# col_insitu)\n# \n# # test locations of data once in dataframe of slopes\n# insitu_jas = arr_to_df(slope_insitu_jas,\n# slope_era5l_jas)\n# satellite_jas = arr_to_df(slope_satellite_jas,\n# slope_era5l_jas)\n# shapefile_data = pd.concat([insitu_jas,satellite_jas])\n# \n# shapefile_data['lon'] = shapefile_data.apply(lambda row: row.lon - 180,axis=1)\n# \n# \n# shapefile_gdf = gpd.GeoDataFrame(shapefile_data,\n# geometry=gpd.points_from_xy(shapefile_data.lon, \n# shapefile_data.lat),\n# crs=\"EPSG:4326\")\n# sfile='shapefile_test_3'\n# shapefile_gdf.to_file(sfile+'.shp')\n# =============================================================================\n","sub_path":"python/eval/gltc.py","file_name":"gltc.py","file_ext":"py","file_size_in_byte":40610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"302691225","text":"from typing import List\n\n\ndef compute_min_refills(distance: int, tank: int, stops: List[int]) -> int:\n \"\"\"Calculates minimum number of stops required.\n\n Args:\n distance (int): total trip distance\n tank (int): max fuel range of car\n stops (List[int]): total trip distance of each stop on route\n\n Returns:\n int: minimum number refills\n \"\"\"\n\n num_refills = 0\n current_refill = 0\n n_stops = len(stops)\n stops = [0, *stops, distance]\n while current_refill <= n_stops:\n last_refill = current_refill\n while (\n current_refill <= n_stops\n and stops[current_refill + 1] - stops[last_refill] <= tank\n ):\n current_refill += 1\n if current_refill == last_refill:\n return -1\n if current_refill <= n_stops:\n num_refills += 1\n return num_refills\n\n\nif __name__ == \"__main__\":\n d = int(input())\n m = int(input())\n n = int(input()) # not need for python implementation\n stops = [x for x in map(int, input().split())]\n # print(f\"d: {d}\")\n # print(f\"d: {m}\")\n # print(f\"stops: {stops}\")\n print(compute_min_refills(d, m, stops))\n","sub_path":"specialization-data-structures-algorithms/algorithmic-toolbox/week3/solutions/3_car_fueling/python/car_fueling.py","file_name":"car_fueling.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"117787945","text":"\"\"\"\nThis module contains functionality for navigation/browsing through text without selecting.\n\"\"\"\nfrom . import commands\nfrom logging import info\nfrom functools import partial\n\ndef movepage(document, backward=False):\n width, height = document.ui.viewport_size\n offset = document.ui.viewport_offset\n if backward:\n new_offset = move_n_wrapped_lines_up(document.text, width, offset, height)\n else:\n new_offset = move_n_wrapped_lines_down(document.text, width, offset, height)\n info('old: {}, new: {}'.format(offset, new_offset))\n document.ui.viewport_offset = new_offset\ncommands.pagedown = movepage\ncommands.pageup = partial(movepage, backward=True)\n\n\ndef center_around_selection(document):\n width, height = document.ui.viewport_size\n document.ui.viewport_offset = move_n_wrapped_lines_up(document.text, width,\n document.selection[0][0],\n int(height / 2))\n\n\ndef move_n_wrapped_lines_up(text, max_line_width, start, n):\n \"\"\"Return position that is n lines above start.\"\"\"\n position = text.rfind('\\n', 0, start)\n if position <= 0:\n return 0\n while 1:\n previousline = text.rfind('\\n', 0, position - 1)\n if previousline <= 0:\n return 0\n n -= int((position - previousline) / max_line_width) + 1\n if n <= 0:\n return position + 1\n position = previousline\n\n\ndef move_n_wrapped_lines_down(text, max_line_width, start, n):\n \"\"\"Return position that is n lines below start.\"\"\"\n position = text.find('\\n', start)\n l = len(text) - 1\n if position == -1 or position == l:\n return l\n while 1:\n eol = text.find('\\n', position)\n if eol == -1 or eol == l:\n return l\n nextline = eol + 1\n n -= int((nextline - position) / max_line_width) + 1\n if n <= 0:\n return position + 1\n position = nextline\n\n","sub_path":"fate/navigation.py","file_name":"navigation.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"46731227","text":"# <>\n# Copyright 2022, Lawrence Livermore National Security, LLC.\n# See the top-level COPYRIGHT file for details.\n# \n# SPDX-License-Identifier: BSD-3-Clause\n# <>\n\nfrom . import enums as enumsModule\nfrom LUPY import ancestry as ancestryModule\nfrom pqu import PQU as PQUModule\n\nclass Table(ancestryModule.AncestryIO):\n\n moniker = 'table'\n\n def __init__(self, columns=None, data=None, storageOrder=enumsModule.StorageOrder.rowMajor):\n\n ancestryModule.AncestryIO.__init__(self)\n\n storageOrder = enumsModule.StorageOrder.checkEnumOrString(storageOrder)\n if storageOrder != enumsModule.StorageOrder.rowMajor:\n raise ValueError('Currently, only supported value for storageOrder is \"%s\", got \"%s\".' % (enumsModule.StorageOrder.rowMajor, storageOrder))\n self.__storageOrder = storageOrder\n\n self.columns = columns or []\n\n self.data = data or []\n if not all( [len(d)==self.nColumns for d in self.data] ):\n raise ValueError(\"Data is the wrong shape for a table with %i columns!\" % self.nColumns)\n\n @property\n def storageOrder(self):\n \"\"\"Returns to value of storageOrder.\"\"\"\n\n return self.__storageOrder\n\n @property\n def nColumns(self): return len(self.columns)\n\n @property\n def nRows(self): return len(self.data)\n\n def __len__( self ):\n return self.nRows\n\n def __getitem__( self, indices ):\n if type(indices) is int: return self.data[ indices ]\n if len(indices)==2:\n i,j = indices\n if type(i) is slice:\n return [d[j] for d in self.data[i]]\n return self.data[i][j]\n raise IndexError(\"invalid index\")\n\n def addRow( self, dataRow ):\n if not len(dataRow) == self.nColumns:\n raise ValueError(\"New row has %i columns, should have %i!\" % (len(dataRow),self.nColumns))\n self.data.append( dataRow )\n\n def addColumn( self, columnHeader, index=None ):\n \"\"\" add another column, either at 'index' or at the end of the table \"\"\"\n if not isinstance(columnHeader, ColumnHeader):\n raise TypeError(\"addColumn requires a ColumnHeader instance but got %s\" % type(columnHeader))\n if index:\n self.columns.insert(index, columnHeader)\n [row.insert(index, 0) for row in self.data]\n else:\n self.columns.append( columnHeader )\n [row.append(0) for row in self.data]\n for idx, col in enumerate(self.columns): col.index = idx\n\n def convertUnits( self, unitMap ):\n \"\"\"\n unitMap is a dictionary of the form { 'eV' : 'MeV', 'b' : 'mb' }.\n Converts all columns whose units appear as keys in unitMap\n \"\"\"\n\n for idx, column in enumerate(self.columns):\n if column.unit in unitMap:\n factor = PQUModule.PQU(1, column.unit).getValueAs(unitMap[column.unit])\n column.unit = unitMap[column.unit]\n for row in self.data:\n row[idx] *= factor\n\n def getColumn( self, columnNameOrIndex, unit=None ):\n \"\"\" get data from one column, identified by the column 'name' attribute.\n Convert results to desired unit if specified \"\"\"\n if isinstance(columnNameOrIndex, int):\n index = columnNameOrIndex\n column = self.columns[index]\n else:\n column = [a for a in self.columns if a.name==columnNameOrIndex]\n if not column: return None\n if len(column) > 1: raise ValueError(\"Column named '%s' is not unique!\" % columnNameOrIndex)\n column = column[0]\n index = self.columns.index( column )\n if unit:\n cf = PQUModule.PQU(1, column.unit).convertToUnit( unit ).getValue()\n return [cf * v for v in self[:,index]]\n return self[:,index]\n\n def removeColumn( self, columnName ):\n \"\"\" remove one column from the table \"\"\"\n column = [a for a in self.columns if a.name==columnName]\n if not column: raise ValueError(\"column '%s' isn't present in the table!\" % columnName)\n if len(column) > 1: raise Exception(\"Column named '%s' is not unique!\" % columnName)\n index = self.columns.index( column[0] )\n self.columns.pop( index )\n [row.pop(index) for row in self.data]\n for idx, col in enumerate(self.columns): col.index = idx\n\n def toStringList(self, indent='',**kwargs):\n addHeader = kwargs.get( 'addHeader', True )\n addHeaderUnit = kwargs.get( 'addHeaderUnit', False )\n outline = kwargs.get( 'outline', False )\n columnWidths = [0] * self.nColumns\n xml = []\n for col in range(self.nColumns):\n columnDat = [row[col] for row in self.data if not isinstance(row[col], Blank)]\n asStrings = list( map( PQUModule.toShortestString, columnDat ) )\n columnWidths[col] = max( list( map( len, asStrings ) ) )\n\n if addHeader:\n \"\"\" put column labels at the top of the table \"\"\"\n names = [col.name for col in self.columns]\n lengths = [len(name) for name in names]\n if any( [' width' in name for name in names] ):\n # special treatment for RML widths: split up onto two lines\n nameL = [[],[]]\n for name in names:\n if ' width' in name: l,r = name.split(' width'); r = ' width'+r+' '\n else: l,r = name, ''\n nameL[0].append(l); nameL[1].append(r)\n names = nameL\n lengths = [len(name) for name in names[0]]\n else: names = [names]\n columnWidths = [max(columnWidths[i],lengths[i]) for i in range(self.nColumns)]\n\n header = ['%s' for nameList in names]\n xml += header\n\n if addHeaderUnit:\n \"\"\" put column unit at the top of the table \"\"\"\n unit = [str(col.unit) for col in self.columns]\n lengths = [len(u) for u in unit]\n columnWidths = [max(columnWidths[i],lengths[i]) for i in range(self.nColumns)]\n header = ['%s']\n xml += header\n\n template = ['%s' % (indent + ' ')] + ['%%%is' % l for l in columnWidths]\n\n def toString(val):\n if isinstance(val, Blank): return str(val)\n return PQUModule.toShortestString(val)\n\n if outline:\n xml += [(' '.join(template) % tuple( map(toString, dataRow))).rstrip() for dataRow in self.data[:3]]\n xml += ['%s ...' % indent]\n xml += [(' '.join(template) % tuple( map(toString, dataRow))).rstrip() for dataRow in self.data[-3:]]\n else:\n xml += [(' '.join(template) % tuple( map(toString, dataRow))).rstrip() for dataRow in self.data]\n return xml\n\n def toXML_strList(self, indent = '', **kwargs):\n\n indent2 = indent + kwargs.get('incrementalIndent', ' ')\n indent3 = indent2 + kwargs.get('incrementalIndent', ' ')\n if len(self.data) < 10: outline = False\n\n XML_strList = ['%s<%s rows=\"%i\" columns=\"%i\">' % (indent,self.moniker,self.nRows,self.nColumns)]\n XML_strList.append('%s' % (indent2))\n for column in self.columns: XML_strList += column.toXML_strList(indent3)\n XML_strList[-1] += ''\n\n if not self.data:\n XML_strList.append('%s' % (indent2, self.moniker))\n return XML_strList\n\n XML_strList.append('%s' % (indent2))\n XML_strList += self.toStringList(indent=indent3, **kwargs)\n XML_strList[-1] += '' % self.moniker\n\n return XML_strList\n\n @classmethod\n def parseNodeUsingClass(cls, element, xPath, linkData, **kwargs):\n \"\"\"\n Read a table element from xml into python. To convert a column or attribute from string to some other type,\n enter the new type in the conversionTable: {'index':int, 'scatteringRadius':PhysicalQuantityWithUncertainty, etc}.\n \"\"\"\n\n xPath.append(element.tag)\n def fixAttributes(items):\n attrs = dict(items)\n for key in attrs:\n if key == 'index': attrs[key] = int(attrs[key])\n if key in conversionTable: attrs[key] = conversionTable[key]( attrs[key] )\n return attrs\n\n def floatOrBlank(val):\n if val=='_': return Blank()\n return float(val)\n\n conversionTable = linkData.get('conversionTable',{})\n nRows = int(element.get('rows'))\n nColumns = int(element.get('columns'))\n storageOrder = element.get('storageOrder', enumsModule.StorageOrder.rowMajor)\n if storageOrder != enumsModule.StorageOrder.rowMajor:\n storageOrder = enumsModule.StorageOrder.fromString(storageOrder)\n\n _columns = element.find('columnHeaders')\n columns = [ ColumnHeader(**fixAttributes(list(column.items()))) for column in _columns ]\n data = element.find('data')\n\n if data.text:\n data = list(map(floatOrBlank, data.text.split()))\n else:\n data = []\n for i in range(len(columns)):\n if columns[i].name in conversionTable:\n data[i::nColumns] = list(map(conversionTable[columns[i].name], data[i::nColumns]))\n assert len(data) == nRows * nColumns\n data = [data[i*nColumns:(i+1)*nColumns] for i in range(nRows)]\n\n table = cls(columns, data, storageOrder=storageOrder)\n\n xPath.pop()\n\n return table\n\nclass ColumnHeader:\n \"\"\" defines one column in a table \"\"\"\n\n def __init__( self, index, name, unit ):\n self.index = index\n self.name = name\n self.unit = unit\n\n def __str__(self):\n return '%s (%s)'%(self.name,self.unit)\n\n def __eq__(self, other):\n return self.name == other.name and self.unit == other.unit\n\n def toXML_strList(self, indent='', **kwargs):\n\n return [ '%s' % ( indent, self.index, self.name, self.unit ) ]\n\nclass Blank:\n \"\"\"Blank table entry, to indicate missing data.\"\"\"\n\n def __init__( self ): pass\n def __str__( self ): return '_'\n def __add__( self, other ): return self\n def __radd__( self, other ): return self\n def __mul__( self, other ): return self\n def __rmul__( self, other ): return self\n","sub_path":"xData/table.py","file_name":"table.py","file_ext":"py","file_size_in_byte":10614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"317791104","text":"import pandas as pd\nimport numpy as np\nimport gym\nimport random\n\nDATA_DIR = 'data/'\n\nclass EquityEnv(gym.Env):\n metadata = {'render.modes': ['human']}\n def __init__(self, \n principal=1000000, \n use_cost=False,\n split_data=False,\n asset_num=3, \n transaction_ratio=0.0002,\n episode_length=120):\n \"\"\" \n .t: int idx of df\n .google/.amazon/.msft: [[Open, Close, High, Low, Volume]]\n .holdings: {google_pos: int, amazon_pos: int, msft_pos: int} \n ._setup(): helper function that cleans data and initializes dfs\n .position: double = balance (double) + total of all pos. (double) \n \"\"\" \n \n self.principal = principal\n self.balance = principal\n self.use_cost = use_cost\n self.position = None\n self.pnl = 0\n self.transaction_ratio = transaction_ratio\n self.asset_num = asset_num\n \n # State/Action Spaces\n self.observation_space = gym.spaces.Box(low=0, high=3, shape=(5*asset_num+asset_num,), dtype='float32')\n self.action_space = gym.spaces.Box(-2, 2, shape=(asset_num,), dtype='float32')\n\n # Date idx\n self.t = None \n self.start_idx = None\n self.end_idx = None\n \n # Dataframes\n self.states = None\n self.close_prices = None\n self.open_prices = None\n self.dates = None\n \n # Initializes dfs\n self._setup()\n \n # Training Params\n self.split_data = split_data\n period = len(self.close_prices)\n self.test_length = round(period/500)*100 #7200\n self.train_period = np.arange(0, period-self.test_length) #0 to 28800\n self.test_period = np.arange(period-self.test_length, period) # 28800 to 36000\n\n print('-- Environment Created --')\n \n def reset(self):\n \"\"\"\n return: [position + google_t + amazon_t + msft_t]\n \"\"\"\n if self.split_data:\n self.start_idx = max(0, random.choice(TRAIN_PERIOD) - EPISODE_LENGTH)\n self.end_idx = self.start_idx + EPISODE_LENGTH\n else:\n self.start_idx = 0\n self.end_idx = 36000\n self.t = self.start_idx\n self.balance = self.principal\n self.position = np.array([0.0] * self.asset_num)\n self.pnl = 0\n return self._get_state(self.t)\n \n def step(self, action: list):\n \"\"\"\n action: [new_google_pos, new_amazon, new_msft_pos]\n return: \n : [new_pos, google_t, amazon_t, msft_t]\n : double, capital_gain - transaction_cost\n : bool\n : {\n 'date': dateobj, \n 'transaction_cost': double\n 'capital_gain': double ,\n 'previous_close': double,\n 'current_close': double\n } \n \"\"\"\n self.t = self.t + 1\n\n # Done\n done = True if self.t >= self.end_idx else False\n \n # Reward \n reward, info = self._get_reward(action)\n \n # Next State\n next_state = self._get_state(self.t)\n \n return next_state, reward, done, info\n \n def render(self, mode='human', close=False):\n return NotImplemented\n \n def _get_close_prices(self, t):\n return np.array(self.close_prices.iloc[t].tolist())\n \n def _get_state(self, t):\n return np.concatenate([self.position,\n self.states.iloc[t].tolist()])\n \n def _get_reward(self, action):\n action = np.array(action)\n \n # Positions (dollar neutral): long/short pos same\n old_position = self.position\n new_position = action - action.mean()\n \n # Clipping weights\n for i in range(self.asset_num):\n if new_position[i] > 1:\n new_position[i] = 1\n elif new_position[i] < -1:\n new_position[i] = -1\n \n # Close Prices\n previous_close = self._get_close_prices(self.t-1)\n current_close = self._get_close_prices(self.t)\n \n # Intermediate Reward Calculations\n capital_gain = np.dot(new_position, (current_close - previous_close) / previous_close) * self.principal\n transaction_cost = (np.absolute(new_position - old_position).sum() * self.transaction_ratio * self.principal) if self.use_cost else 0\n \n # Reward\n reward = capital_gain - transaction_cost\n self.pnl += reward\n self.position = new_position\n \n # Debugging Info\n info = {'date': self.dates.iloc[self.t],\n 'transaction_cost': transaction_cost,\n 'capital_gain': capital_gain,\n 'previous_close': previous_close,\n 'current_close': current_close}\n \n return reward, info\n \n def _setup(self):\n states_df = pd.read_csv(DATA_DIR + \"/state.csv\")\n prices_df = pd.read_csv(DATA_DIR + \"/price.csv\")\n self.states = states_df[['open_gg', 'close_gg', 'high_gg', 'low_gg', 'volume_gg',\n 'open_am', 'close_am', 'high_am', 'low_am', 'volume_am',\n 'open_ms', 'close_ms', 'high_ms', 'low_ms', 'volume_ms']]\n self.close_prices = prices_df[['close_gg', 'close_am', 'close_ms']]\n self.open_prices = prices_df[['open_gg', 'open_am', 'open_ms']]\n self.dates = states_df[['Dates']]\n print('-- Data Loaded --')\n","sub_path":"envs/EquityEnv_v2.py","file_name":"EquityEnv_v2.py","file_ext":"py","file_size_in_byte":5666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"616678820","text":"import jump\n\nREFERRAL_CODE = \"XXXX\"\nBINARY_LOCATION = r\"C:\\Program Files\\Mozilla Firefox\\firefox.exe\"\nGECKO_DRIVER_LOCATION = r\"C:\\Program Files\\Mozilla Firefox\\geckodriver.exe\"\n\nqueue_bot = jump.QueueJumpBot(BINARY_LOCATION, GECKO_DRIVER_LOCATION)\nqueue_bot.load_robinhood(REFERRAL_CODE)\n\nwhile True:\n queue_bot.commit_referral(queue_bot.generate_random_email())\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"79233243","text":"import graphene\r\nfrom graphene_django import DjangoObjectType\r\nfrom shops.models import Category, Product, Shop, ImportCountry\r\nfrom user.schema import UserType\r\nfrom django.db.models import Q, F, Subquery\r\n\r\n\r\nclass ShopType(DjangoObjectType):\r\n class Meta:\r\n model = Shop\r\n\r\n\r\nclass ProductType(DjangoObjectType):\r\n class Meta:\r\n model = Product\r\n\r\n\r\nclass ImportCountryType(DjangoObjectType):\r\n class Meta:\r\n model = ImportCountry\r\n\r\n\r\nclass CategoryType(DjangoObjectType):\r\n class Meta:\r\n model = Category\r\n\r\n\r\nclass Query(graphene.ObjectType):\r\n shops = graphene.List(ShopType)\r\n products = graphene.List(ProductType)\r\n countries = graphene.List(ImportCountryType)\r\n categories = graphene.List(CategoryType)\r\n category = graphene.Field(\r\n CategoryType,\r\n search=graphene.String(required=True)\r\n )\r\n product = graphene.Field(\r\n ProductType,\r\n title=graphene.String(required=True)\r\n )\r\n shop = graphene.Field(\r\n ShopType,\r\n id=graphene.Int(required=False),\r\n name=graphene.String(required=False),\r\n )\r\n country = graphene.Field(\r\n ImportCountryType,\r\n name=graphene.String(required=True),\r\n )\r\n\r\n def resolve_shops(self, info):\r\n return Shop.objects.all()\r\n\r\n def resolve_products(self, info):\r\n return Product.objects.all()\r\n\r\n def resolve_countries(self, info):\r\n return ImportCountry.objects.all()\r\n\r\n def resolve_categories(self, info):\r\n return Category.objects.all()\r\n\r\n def resolve_category(self, info, **kwargs):\r\n search = kwargs.get('search', None)\r\n if search:\r\n try:\r\n category = Category.objects.get(name__icontains=search)\r\n except Category.DoesNotExist:\r\n raise Exception(\r\n f\"The category {search!r} does not exist.\")\r\n else:\r\n return category\r\n else:\r\n raise Exception(\"You should enter text to search.\")\r\n\r\n def resolve_country(self, info, **kwargs):\r\n name = kwargs.get('name', '')\r\n if name:\r\n try:\r\n country = ImportCountry.objects.get(name__icontains=name)\r\n except ImportCountry.DoesNotExist:\r\n raise Exception(f\"Could not find any country named {name!r}\")\r\n else:\r\n return country\r\n else:\r\n raise Exception(\"You should enter a name.\")\r\n\r\n def resolve_product(self, info, **kwargs):\r\n title = kwargs.get('title', '')\r\n if title:\r\n title = title.lower()\r\n try:\r\n product = Product.objects.get(title__icontains=title)\r\n except Product.DoesNotExist:\r\n raise Exception(\r\n f\"Could not find any product with title contains {title!r}\")\r\n else:\r\n return product\r\n else:\r\n raise Exception(\"You have to enter a product title.\")\r\n\r\n def resolve_shop(self, info, **kwargs):\r\n id, name = [kwargs.get(key, None) for key in ['id', 'name']]\r\n if id or name:\r\n if id:\r\n try:\r\n shop = Shop.objects.get(id=id)\r\n except Shop.DoesNotExist:\r\n raise Exception(f\"The shop with id={id} does not exist\")\r\n else:\r\n return shop\r\n else:\r\n try:\r\n shop = Shop.objects.filter(name__icontains=name)\r\n except Shop.DoesNotExist:\r\n raise Exception(\r\n f\"The shop with the name contains {name!r} does not exist.\")\r\n else:\r\n return shop\r\n else:\r\n raise Exception(\"You have to enter a shop name.\")\r\n\r\n\r\nclass CreateCountry(graphene.Mutation):\r\n ok = graphene.Boolean(required=True)\r\n country = graphene.Field(ImportCountryType)\r\n errors = graphene.String(required=False)\r\n\r\n class Arguments:\r\n name = graphene.String(required=True)\r\n\r\n def mutate(self, info, **kwargs):\r\n ok, country, errors = False, None, None\r\n name = kwargs.get('name', '')\r\n\r\n if name:\r\n name = name.strip()\r\n filtered_names = ImportCountry.objects.filter(Q(name__iexact=name))\r\n # filter out if there are names already exist\r\n if filtered_names.count():\r\n errors = f\"Country with name {name!r} does already exist.\"\r\n else:\r\n country = ImportCountry.objects.create(name=name)\r\n ok = True\r\n country = country\r\n else:\r\n errors = \"You must provide a name.\"\r\n \r\n return CreateCountry(\r\n ok=ok,\r\n country=country,\r\n errors=errors,\r\n )\r\n\r\n\r\nclass UpdateShop(graphene.Mutation):\r\n ok = graphene.Boolean(required=True)\r\n shop = graphene.Field(ShopType)\r\n error = graphene.String(required=False)\r\n\r\n class Arguments:\r\n name = graphene.String(required=False)\r\n email = graphene.String(required=False)\r\n phone = graphene.String(required=False)\r\n categories = graphene.NonNull(graphene.List(graphene.Int, required=True))\r\n slogan = graphene.String(required=False)\r\n # get list of category names in alphabetical format\r\n\r\n def mutate(self, info, **kwargs):\r\n ok, shop, error = False, None, None\r\n \"\"\"\r\n Right when an user sign up, he(she) is automatically created a shop with a few default shop properties.\r\n So here, we just need to update his(her) shop instead of creating a new one (OneToOne model relationship).\r\n \"\"\"\r\n user = info.context.user\r\n if user.is_anonymous:\r\n error = \"You have to log in to update your shop.\"\r\n else:\r\n category_list_enter_by_user = kwargs.pop('categories')\r\n # try to fetch a shop from db based on 'owner_id'\r\n # if exists, update with 'defaults' argument\r\n # otherwise, create a new one using 'defaults'\r\n # the result is a tuple: (object: Model, created: Boolean)\r\n new_shop = Shop.objects.update_or_create(\r\n owner_id=user.id,\r\n defaults=kwargs,\r\n )[0]\r\n # save this shop before adding any category\r\n if len(category_list_enter_by_user):\r\n shop.categories.add(\r\n *Category.objects.filter(\r\n Q(id_in=category_list_enter_by_user)\r\n )\r\n )\r\n ok = True\r\n shop = new_shop\r\n else:\r\n error = \"Please enter at least one category.\"\r\n\r\n return UpdateShop(\r\n ok=ok,\r\n shop=shop,\r\n error=error,\r\n )\r\n\r\n\r\nclass CreateCategory(graphene.Mutation):\r\n ok = graphene.Boolean(required=True)\r\n category = graphene.Field(CategoryType)\r\n error = graphene.String(required=False)\r\n\r\n class Arguments:\r\n name = graphene.String(required=True)\r\n\r\n def mutate(self, info, **kwargs):\r\n ok, category, error = False, None, None\r\n user = info.context.user\r\n if user.is_anonymous:\r\n error = \"You must login to add new category\"\r\n else:\r\n name = kwargs.get('name', None)\r\n if name:\r\n name = name.strip().lower()\r\n category = Category.objects.get_or_create(name=name)\r\n # using this helps you never mind about database pk increment\r\n # as get_or_create() returns a tuple(Instance, Boolean)\r\n ok = True\r\n category = category[0]\r\n else:\r\n # raise Exception(\"You must enter a valid name\")\r\n error = \"You must enter a category name.\"\r\n\r\n return CreateCategory(\r\n ok=ok,\r\n category=category,\r\n error=error,\r\n )\r\n\r\n\r\nclass CreateProduct(graphene.Mutation):\r\n ok = graphene.Boolean(required=True)\r\n product = graphene.Field(ProductType)\r\n error = graphene.String(required=False)\r\n\r\n class Arguments:\r\n title = graphene.String(required=True)\r\n description = graphene.String(required=True)\r\n price = graphene.Float(required=True)\r\n on_sale = graphene.Float(required=False, default_value=0.0)\r\n total_products = graphene.Int(required=True)\r\n categories = graphene.NonNull(\r\n graphene.List(graphene.Int, required=True)\r\n )\r\n source = graphene.List(\r\n graphene.Int,\r\n required=False,\r\n default_value=[],\r\n description='The country this product was imported.'\r\n )\r\n images = graphene.NonNull(\r\n graphene.List(graphene.String, required=True),\r\n )\r\n\r\n def mutate(self, info, **kwargs):\r\n ok, product, error = False, None, None\r\n\r\n user = info.context.user\r\n if user.is_anonymous:\r\n error = \"You have to login to add new product.\"\r\n else:\r\n source_list, image_list, category_list = [kwargs.pop(key, []) for key in ['source', 'images', 'categories']]\r\n new_product = user.shop.products.create(**kwargs)\r\n if len(source_list):\r\n source_list = ImportCountry.objects.filter(Q(id__in=source_list))\r\n if source_list.count():\r\n new_product.source.add(*source_list)\r\n\r\n if len(category_list):\r\n category_list = Category.objects.filter(Q(id__in=category_list))\r\n if category_list.count():\r\n new_product.categories.add(*category_list)\r\n else:\r\n raise Exception(\r\n \"You have to enter at least one category.\"\r\n )\r\n if len(image_list):\r\n new_product.images.add(*image_list)\r\n\r\n return CreateProduct(product=new_product)\r\n\r\n\r\n# class UpdateProduct(graphene.Mutation):\r\n# product = graphene.Field(ProductType)\r\n\r\n# class Arguments:\r\n# title = graphene.String(required=False)\r\n# description = graphene.String(required=False)\r\n# price = graphene.Float(required=False)\r\n# on_sale = graphene.Float(required=False)\r\n# total_products = graphene.Int(required=False)\r\n# source = graphene.List(graphene.NonNull(graphene.Int), required=False)\r\n# categories = graphene.List(graphene.NonNull(graphene.Int), required=False)\r\n\r\n# def mutate(self, info, **kwargs):\r\n# user = info.user\r\n# if user.is_anonymous:\r\n# raise Exception(\"You must log in to update product\")\r\n\r\n# category_list = kwargs.pop('categories')\r\n# product = Product.objects.update(**kwargs)\r\n# if len(category_list):\r\n# product.categories.add(*category_list)\r\n\r\n# return UpdateProduct(product=product)\r\n\r\n\r\nclass Mutation(graphene.ObjectType):\r\n create_country = CreateCountry.Field()\r\n create_category = CreateCategory.Field()\r\n update_shop = UpdateShop.Field()\r\n create_product = CreateProduct.Field()\r\n","sub_path":"shops/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":11380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"282783294","text":"try:\n from PyQt5.QtWidgets import *\n from PyQt5.uic import loadUi\nexcept NameError:\n print (\"Error: Could not find PyQt modules.\")\n\nfrom inspect import getmembers\nimport sys\n\n\nclass UIWindowLoader(QMainWindow):\n\n text_field = {}\n checkbox = {}\n slider = {}\n function = {}\n\n def __init__(self, ui_file=\"file.ui\", debug=False, function_class=None):\n super(UIWindowLoader, self).__init__()\n try:\n loadUi(ui_file, self)\n except FileNotFoundError:\n print (\"Error: Could not find ui file.\")\n return\n self.process_widgets(debug, function_class)\n self.show()\n\n def process_widgets(self, debug, function_class):\n\n for widget in QApplication.allWidgets():\n if isinstance(widget, QPushButton):\n self.connect_push_buttons(widget, function_class)\n if isinstance(widget, QLineEdit):\n self.text_field[widget.objectName()] = widget.text()\n self.connect_text_fields(widget)\n if isinstance(widget, QCheckBox):\n if widget.isChecked():\n self.checkbox[widget.objectName()] = True\n else:\n self.checkbox[widget.objectName()] = False\n self.connect_checkboxes(widget)\n if isinstance(widget, QSlider):\n self.slider[widget.objectName()] = widget.value()\n self.connect_sliders(widget)\n\n def create_function_list(self, widget, function_class):\n try:\n self.function[widget.objectName()] = getattr(\n function_class, widget.objectName())\n except AttributeError:\n print (\"Error: The target class does not have a function called: \",\n widget.objectName())\n\n def connect_push_buttons(self, widget, function_class):\n if function_class is not None:\n try:\n widget.clicked.connect(getattr(function_class,\n widget.objectName()))\n except AttributeError:\n print (\"Error: Target class have no function called: \" +\n widget.objectName())\n else:\n try:\n widget.clicked.connect(getattr(sys.modules[__name__],\n widget.objectName()))\n except AttributeError:\n print (\"Error: Assing a function to the push button: \" +\n widget.objectName())\n\n def connect_text_fields(self, widget):\n widget.textChanged.connect(lambda: self.save_text_lines(widget))\n\n def connect_checkboxes(self, widget):\n widget.stateChanged.connect(lambda: self.get_checkbox_value(widget))\n\n def connect_sliders(self, widget):\n widget.valueChanged.connect(lambda: self.get_slider_value(widget,\n self.slider))\n\n def get_slider_value(self, widget, slider_dict):\n slider_dict[widget.objectName()] = widget.value()\n\n def get_checkbox_value(self, widget):\n if widget.isChecked():\n self.checkbox[widget.objectName()] = True\n else:\n self.checkbox[widget.objectName()] = False\n\n def save_text_lines(self, widget):\n self.text_field[widget.objectName()] = widget.text()\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n window = UIWindowLoader(ui_file=\"file.ui\")\n app.processEvents()\n sys.exit(app.exec_())\n","sub_path":"QTLoader.py","file_name":"QTLoader.py","file_ext":"py","file_size_in_byte":3473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"600084072","text":"print(\"loading packages...\")\nimport pandas as pd\n\n# Opens the file with data from DCL experiment and parses it into data\nprint(\"Reading ParaK...\")\ndata_archivo = 'paraK.csv'\ndata_paraK = pd.read_csv(data_archivo, index_col=False)\nprint(\"ParaK read!\")\nprint(data_paraK[:3])\n\ndata_paraK = pd.DataFrame(data_paraK.groupby('Stage').get_group(2))\nprint(data_paraK[:3])\n\ndictPerro = {}\ndictGuess = {}\n\nfor jugador, grp_j in data_paraK.groupby('Player'):\n for ronda, grp_r in grp_j.groupby('Round'):\n perros = list(grp_r.Object)\n guesses = list(grp_r.Label)\n for i in range(1, 6):\n perro = 'Perro' + str(i)\n dictPerro[(jugador, ronda, perro)] = perros[i - 1]\n dictGuess[(jugador, ronda, perro)] = guesses[i - 1]\n\ndef aplica_perros(x, y, z):\n return dictPerro[(x,y,z)][0]\n\ndef aplica_guesses(x, y, z):\n return dictGuess[(x,y,z)]\n\nprint(\"Reading comunicacion...\")\ndata_archivo = 'comunicacion.csv'\ndata_comunicacion = pd.read_csv(data_archivo, index_col=False)\nprint(\"comunicacion read!\")\nprint(data_comunicacion[:3])\n\ndata_comunicacion['Dog'] = data_comunicacion[['Player', 'Round', 'Perro']].apply(lambda x: aplica_perros(*x), axis=1)\ndata_comunicacion['Guess'] = data_comunicacion[['Player', 'Round', 'Perro']].apply(lambda x: aplica_guesses(*x), axis=1)\nprint(data_comunicacion[:3])\n\ndata_comunicacion.to_csv(data_archivo, index=False)\nprint(\"Data saved to \", data_archivo)\n","sub_path":"12-16-18-23-27-sept-2019/Group/crea_dicts_comunicacion.py","file_name":"crea_dicts_comunicacion.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"613506451","text":"# -*- coding: utf-8 -*-\n# __Author__: Sdite\n# __Email__ : vonsdite@gmail.com\n\nimport os\nimport time\nimport pickle\nimport queue\nfrom PIL import Image, ImageDraw\nfrom math import sqrt\n\n# 测试手机红米4a 分辨率720*1280\n\n# 保存截图的文件夹\nSCREEN_SHOT_PATH = 'screenshot/'\nif not os.path.isdir(SCREEN_SHOT_PATH):\n os.mkdir(SCREEN_SHOT_PATH)\n\n# 小人最下方的线到小人底盘中心的偏移值\nDEVIATION = 13\n\n# 分数板的最低位置\nSCORE_MAX_UNDERLINE = 192\n\n# 每次跳跃的角度的tan值\nTAN = 0.58278145695364238410596026490066\n# tan = (742-566)/(526-224)\n\n# 小人的宽度\nLITTLE_MAN_WIDTH = 52\n\n# 按压的位置,为开始游戏的位置\nPRESS_X1, PRESS_Y1, PRESS_X2, PRESS_Y2 = (300, 1010, 400, 1010)\n\nPRESS_X_OFFSET = 2.1588599 # 按压时间系数, 需要更改,可以自己慢慢微调\n\n# 调用安卓adb截图并获取截图\ndef pull_screenshot(mission):\n # 截图保存在sd卡中的命名\n img_path = '/sdcard/{}.png'.format(mission)\n\n # adb截图\n os.system('adb shell screencap -p {}'.format(img_path))\n\n # adb拉取截图到'screenshot'目录\n os.system('adb pull {} {}'.format(img_path, SCREEN_SHOT_PATH))\n\n\n# 寻找小人的底盘中心点\ndef find_little_man_center(im):\n width, height = im.size\n\n center_x_sum = 0\n center_x_count = 0\n center_y_max = 0\n center_x = 0\n center_y = 0\n\n # 通过观察,小人最下方底盘是一条直线\n # 由此来找出小人底盘的中心\n # 从分数板下方高度开始遍历,减掉部分循环,提高效率\n for y in range(SCORE_MAX_UNDERLINE, height):\n for x in range(width):\n pixel = im.getpixel((x, y))\n # 根据底边像素值得到的区间\n if (50 < pixel[0] < 59) and (55 < pixel[1] < 63) and (94 < pixel[2] < 102):\n center_x_sum += x\n center_x_count += 1\n center_y_max = y\n\n if center_x_count == 0 or center_x_sum == 0:\n return 0, 0\n\n # 求出小人的中心\n center_x = center_x_sum // center_x_count\n center_y = center_y_max - DEVIATION\n\n # 将小人中心标记出来\n im.putpixel((center_x, center_y), (255, 0, 0))\n\n return center_x, center_y\n\n\n# 寻找要跳到的中心位置\ndef find_target(im, center_x, center_y):\n width, height = im.size\n\n # 要跳到的中心点在 小人底盘上方和分数板下方\n # 先找出要跳到的中心点的x坐标\n target_x_sum = 0\n target_x_count = 0\n target_x = 0\n target_y = 0\n\n for y in range(SCORE_MAX_UNDERLINE, center_y):\n tmp = im.getpixel((0, y)) # 作为参考值\n for x in range(width):\n # 距离小人底盘中心小于小人宽度的跳过\n if abs(x - center_x) < LITTLE_MAN_WIDTH:\n continue\n\n pixel = im.getpixel((x, y))\n if abs(pixel[0] - tmp[0]) + abs(pixel[1] - tmp[1]) + abs(pixel[2] - tmp[2]) > 10:\n target_x_sum += x\n target_x_count += 1\n\n if target_x_sum != 0 and target_x_count != 0:\n target_x = target_x_sum // target_x_count\n break\n\n if target_x_sum == 0 and target_x_count == 0:\n return 0, 0\n\n target_y = int(center_y - TAN * abs(target_x - center_x))\n\n # 将要跳到的目标点标记出来\n im.putpixel((target_x, target_y), (255, 0, 0))\n\n return target_x, target_y\n\n\n# 绘制小人跳的路径\ndef draw_line(im, center_x, center_y, target_x, target_y, mission):\n draw = ImageDraw.Draw(im)\n draw.line((center_x, center_y) + (target_x, target_y), fill=2, width=3)\n del draw\n im.save('{}{}.png'.format(SCREEN_SHOT_PATH, mission))\n\n\n# 跳!\ndef jump(distance):\n if distance == 0:\n # 为0表示游戏没开始,就点击一下开始\n cmd = 'adb shell input tap {} {}'.format(PRESS_X1, PRESS_Y1)\n else:\n press_time = distance * PRESS_X_OFFSET\n press_time = max(press_time, 200) # 设置 200 ms 是最小的按压时间\n press_time = int(press_time)\n # print(press_time)\n cmd = 'adb shell input swipe {} {} {} {} {}'.format(PRESS_X1, PRESS_Y1, PRESS_X2,\n PRESS_Y2, press_time)\n print('press: {}ms distance: {}px'.format(press_time, distance))\n os.system(cmd)\n\n\ndef run():\n mission = 1\n while True:\n pull_screenshot(mission) # 截图并拉取图片\n\n im = Image.open('{}{}.png'.format(SCREEN_SHOT_PATH, mission)) # 打开图片\n # 因为pycharm PIL库不自动补全,加这句就可以了\n assert isinstance(im, Image.Image)\n\n center_x, center_y = find_little_man_center(im) # 获取小人底盘中心\n target_x, target_y = find_target(im, center_x, center_y) # 获取要跳到的目标位置\n jump(sqrt((center_x - target_x) ** 2 + (center_y - target_y) ** 2)) # 跳\n\n draw_line(im, center_x, center_y, target_x, target_y, mission) # 绘制要跳跃的路径\n\n mission += 1\n\n time.sleep(1)\n\n\nif __name__ == '__main__':\n run()\n","sub_path":"pyJumpAndJump.py","file_name":"pyJumpAndJump.py","file_ext":"py","file_size_in_byte":5068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"206088694","text":"import re\nimport time\nimport uuid\nimport argparse\nimport csv\nimport json\nimport os\nimport random\nimport sys\nfrom tqdm import tqdm\n\n# package local imports\nsys.path.append(os.getcwd() + \"/..\")\n\nfrom common_datagen import (\n download_url,\n generate_setup_json,\n compress_files,\n generate_inputs_dict_item,\n humanized_bytes,\n del_non_use_case_specific_keys,\n add_key_metric,\n upload_dataset_artifacts_s3,\n add_deployment_requirements_redis_server_module,\n add_deployment_requirements_benchmark_tool,\n add_deployment_requirements_utilities,\n init_deployment_requirement,\n remove_file_if_exists,\n)\n\n\ndef process_inventory(\n row,\n market_count,\n nodes,\n total_nodes,\n docs_map,\n product_ids,\n countries_alpha_3,\n countries_alpha_p,\n):\n # uniq_id,product_name,manufacturer,price,number_available_in_stock,number_of_reviews,number_of_answered_questions,average_review_rating,amazon_category_and_sub_category,customers_who_bought_this_item_also_bought,description,product_information,product_description,items_customers_buy_after_viewing_this_item,customer_questions_and_answers,customer_reviews,sellers\n added_docs = 0\n NUMERIC = \"NUMERIC\"\n GEO = \"GEO\"\n TAG = \"TAG\"\n TEXT = \"TEXT\"\n for inner_doc_pos in range(0, market_count):\n skuId = row[0]\n brand = row[2]\n price = random.random() * 1500.0\n number_of_reviews = random.randint(0, 64000)\n average_review_rating = random.random() * 5.0\n sellers_raw = row[16]\n nodeType = \"store\"\n availableToSource = \"true\"\n standardAvailableToPromise = \"true\"\n bopisAvailableToPromise = \"true\"\n onHold = \"false\"\n exclusionType = \"false\"\n\n onhand = random.randint(0, 64000)\n allocated = random.randint(0, 64000)\n reserved = random.randint(0, 64000)\n storeAllocated = random.randint(0, 64000)\n transferAllocated = random.randint(0, 64000)\n storeReserved = random.randint(0, 64000)\n confirmedQuantity = random.randint(0, 64000)\n standardSafetyStock = random.randint(0, 64000)\n bopisSafetyStock = random.randint(0, 64000)\n virtualHold = random.randint(0, 64000)\n\n onhandLastUpdatedTimestamp = int(time.time() + random.randint(0, 24 * 60 * 60))\n allocatedLastUpdatedTimestamp = int(\n time.time() + random.randint(0, 24 * 60 * 60)\n )\n reservedLastUpdatedTimestamp = int(\n time.time() + random.randint(0, 24 * 60 * 60)\n )\n storeAllocatedLastUpdatedTimestamp = int(\n time.time() + random.randint(0, 24 * 60 * 60)\n )\n transferAllocatedLastUpdatedTimestamp = int(\n time.time() + random.randint(0, 24 * 60 * 60)\n )\n storeReservedLastUpdatedTimestamp = int(\n time.time() + random.randint(0, 24 * 60 * 60)\n )\n\n pattern = re.compile(\"[\\W_]+\")\n\n sellers = re.findall(r'\\\"Seller_name_\\d+\\\"=>\\\"([^\"]+)\\\"', sellers_raw)\n if len(sellers) == 0:\n available = \"false\"\n\n for node in sellers:\n if node not in nodes:\n total_nodes = total_nodes + 1\n nodeId = total_nodes\n nodes[node] = nodeId\n\n nodesList = list(nodes.keys())\n if len(nodesList) > 0:\n # k = 5 if 5 <= len(nodesList) else len(nodesList)\n k = 10\n for node in random.choices(nodesList, k=k):\n # print(random.choices(nodesList, k=k))\n nodeId = nodes[node]\n did = str(uuid.uuid4()).replace(\"-\", \"\")\n if skuId not in product_ids:\n product_ids[skuId] = 1\n else:\n product_ids[skuId] += 1\n market = random.choices(countries_alpha_3, weights=countries_alpha_p)[0]\n doc_id = \"{market}_{nodeId}_{skuId}\".format(\n market=market, nodeId=nodeId, skuId=did\n )\n\n if doc_id not in docs_map:\n doc = {\n \"doc_id\": doc_id,\n \"schema\": {\n \"price\": {\n \"type\": NUMERIC,\n \"value\": price,\n \"field_options\": [\"SORTABLE\"],\n },\n \"number_of_reviews\": {\n \"type\": NUMERIC,\n \"value\": number_of_reviews,\n \"field_options\": [\"SORTABLE\"],\n },\n \"average_review_rating\": {\n \"type\": NUMERIC,\n \"value\": average_review_rating,\n \"field_options\": [\"SORTABLE\"],\n },\n \"market\": {\n \"type\": TAG,\n \"value\": market,\n \"field_options\": [\"SORTABLE\"],\n },\n \"nodeId\": {\n \"type\": TAG,\n \"value\": nodeId,\n \"field_options\": [\"SORTABLE\"],\n },\n \"skuId\": {\n \"type\": TAG,\n \"value\": skuId,\n \"field_options\": [\"SORTABLE\"],\n },\n # onhand\n \"onhand\": {\n \"type\": NUMERIC,\n \"value\": onhand,\n \"field_options\": [\"SORTABLE\", \"NOINDEX\"],\n },\n \"onhandLastUpdatedTimestamp\": {\n \"type\": NUMERIC,\n \"value\": onhandLastUpdatedTimestamp,\n \"field_options\": [\"SORTABLE\", \"NOINDEX\"],\n },\n # allocated\n \"allocated\": {\n \"type\": NUMERIC,\n \"value\": allocated,\n \"field_options\": [\"SORTABLE\", \"NOINDEX\"],\n },\n \"allocatedLastUpdatedTimestamp\": {\n \"type\": NUMERIC,\n \"value\": allocatedLastUpdatedTimestamp,\n \"field_options\": [\"SORTABLE\", \"NOINDEX\"],\n },\n # reserved\n \"reserved\": {\n \"type\": NUMERIC,\n \"value\": reserved,\n \"field_options\": [\"SORTABLE\", \"NOINDEX\"],\n },\n \"reservedLastUpdatedTimestamp\": {\n \"type\": NUMERIC,\n \"value\": reservedLastUpdatedTimestamp,\n \"field_options\": [\"SORTABLE\", \"NOINDEX\"],\n },\n # store allocated\n \"storeAllocated\": {\n \"type\": NUMERIC,\n \"value\": storeAllocated,\n \"field_options\": [\"SORTABLE\", \"NOINDEX\"],\n },\n \"storeAllocatedLastUpdatedTimestamp\": {\n \"type\": NUMERIC,\n \"value\": storeAllocatedLastUpdatedTimestamp,\n \"field_options\": [\"SORTABLE\", \"NOINDEX\"],\n },\n # transfer allocated\n \"transferAllocated\": {\n \"type\": NUMERIC,\n \"value\": transferAllocated,\n \"field_options\": [\"SORTABLE\", \"NOINDEX\"],\n },\n \"transferAllocatedLastUpdatedTimestamp\": {\n \"type\": NUMERIC,\n \"value\": transferAllocatedLastUpdatedTimestamp,\n \"field_options\": [\"SORTABLE\", \"NOINDEX\"],\n },\n # transfer allocated\n \"storeReserved\": {\n \"type\": NUMERIC,\n \"value\": storeReserved,\n \"field_options\": [\"SORTABLE\", \"NOINDEX\"],\n },\n \"storeReservedLastUpdatedTimestamp\": {\n \"type\": NUMERIC,\n \"value\": storeReservedLastUpdatedTimestamp,\n \"field_options\": [\"SORTABLE\", \"NOINDEX\"],\n },\n # store reserved\n \"confirmedQuantity\": {\n \"type\": NUMERIC,\n \"value\": confirmedQuantity,\n \"field_options\": [\"SORTABLE\", \"NOINDEX\"],\n },\n \"standardSafetyStock\": {\n \"type\": NUMERIC,\n \"value\": standardSafetyStock,\n \"field_options\": [\"SORTABLE\", \"NOINDEX\"],\n },\n \"bopisSafetyStock\": {\n \"type\": NUMERIC,\n \"value\": bopisSafetyStock,\n \"field_options\": [\"SORTABLE\", \"NOINDEX\"],\n },\n \"virtualHold\": {\n \"type\": NUMERIC,\n \"value\": virtualHold,\n \"field_options\": [\"SORTABLE\", \"NOINDEX\"],\n },\n # tags\n \"availableToSource\": {\n \"type\": TAG,\n \"value\": pattern.sub(\"\", availableToSource),\n \"field_options\": [],\n },\n \"standardAvailableToPromise\": {\n \"type\": TAG,\n \"value\": pattern.sub(\"\", standardAvailableToPromise),\n \"field_options\": [],\n },\n \"bopisAvailableToPromise\": {\n \"type\": TAG,\n \"value\": pattern.sub(\"\", bopisAvailableToPromise),\n \"field_options\": [],\n },\n \"nodeType\": {\n \"type\": TAG,\n \"value\": pattern.sub(\"\", nodeType),\n \"field_options\": [],\n },\n \"brand\": {\n \"type\": TAG,\n \"value\": pattern.sub(\"\", brand),\n \"field_options\": [\"NOINDEX\"],\n },\n \"onHold\": {\n \"type\": TAG,\n \"value\": pattern.sub(\"\", onHold),\n \"field_options\": [],\n },\n \"exclusionType\": {\n \"type\": TAG,\n \"value\": pattern.sub(\"\", exclusionType),\n \"field_options\": [],\n },\n },\n }\n docs_map[doc_id] = doc\n dd = {k: v[\"value\"] for k, v in doc[\"schema\"].items()}\n\n # print(\"{\")\n # for k, v in dd.items():\n # print(\" \\\"{}\\\" : \\\"{}\\\",\".format(k, v))\n # print(\"}\")\n added_docs = added_docs + 1\n\n return nodes, total_nodes, docs_map, added_docs, product_ids\n\n\ndef generate_ft_aggregate_row(\n index, countries_alpha_3, countries_alpha_p, maxSkusList, skus, maxNodesList, nodes\n):\n # number of products by region\n qr = random.random()\n if qr < 0.5:\n cmd = [\n \"READ\",\n \"AGGREGATE1-GROUPBY\",\n 1,\n \"FT.AGGREGATE\",\n \"{index}\".format(index=index),\n \"*\",\n \"GROUPBY\",\n 1,\n \"@market\",\n \"REDUCE\",\n \"COUNT\",\n 0,\n \"AS\",\n \"nb_of_products\",\n ]\n else:\n # number of products by region with rating above a threshold\n min_ration = random.random() * 5.0\n cmd = [\n \"READ\",\n \"AGGREGATE2-QUERY-FILTERED-GROUPBY\",\n 1,\n \"FT.AGGREGATE\",\n \"{index}\".format(index=index),\n \"@average_review_rating:[{},5.0]\".format(min_ration),\n \"GROUPBY\",\n 1,\n \"@market\",\n \"REDUCE\",\n \"COUNT\",\n 0,\n \"AS\",\n \"nb_of_products\",\n ]\n\n return cmd\n\n\ndef generate_ft_add_row(index, doc):\n cmd = [\n \"SETUP_WRITE\",\n \"S1\",\n 1,\n \"HSET\",\n \"{index}-{doc_id}\".format(index=index, doc_id=doc[\"doc_id\"]),\n ]\n for f, v in doc[\"schema\"].items():\n cmd.append(f)\n cmd.append(v[\"value\"])\n return cmd\n\n\ndef generate_ft_create_row(index, doc):\n cmd = [\"FT.CREATE\", \"{index}\".format(index=index), \"ON\", \"HASH\", \"SCHEMA\"]\n for f, v in doc[\"schema\"].items():\n cmd.append(f)\n cmd.append(v[\"type\"])\n cmd.append(\"SORTABLE\")\n return cmd\n\n\ndef generate_ft_drop_row(index):\n cmd = [\"FT.DROP\", \"{index}\".format(index=index)]\n return cmd\n\n\ndef generate_ft_add_update_row(index, doc):\n cmd = [\n \"UPDATE\",\n \"U1\",\n 1,\n \"HSET\",\n \"{index}-{doc_id}\".format(index=index, doc_id=doc[\"doc_id\"]),\n ]\n standardAvailableToPromise = (\n \"true\" if bool(random.getrandbits(1)) == True else \"false\"\n )\n availableToSource = \"true\" if bool(random.getrandbits(1)) == True else \"false\"\n market = doc[\"schema\"][\"market\"][\"value\"]\n nodeId = doc[\"schema\"][\"nodeId\"][\"value\"]\n nodeType = doc[\"schema\"][\"nodeType\"][\"value\"]\n new = [\n \"market\",\n market,\n \"nodeId\",\n nodeId,\n \"nodeType\",\n nodeType,\n \"availableToSource\",\n availableToSource,\n \"standardAvailableToPromise\",\n standardAvailableToPromise,\n ]\n cmd.extend(new)\n return cmd\n\n\ndef generate_setup_commands():\n global progress, csvfile, nodes, total_nodes, docs_map, skusIds, total_docs\n docs = []\n print(\"-- generating the write commands -- \")\n print(\"Reading csv data to generate docs\")\n progress = tqdm(unit=\"docs\", total=doc_limit)\n while total_docs < doc_limit:\n with open(input_data_filename, newline=\"\") as csvfile:\n spamreader = csv.reader(csvfile, delimiter=\",\")\n for row in spamreader:\n nodes, total_nodes, docs_map, added_docs, skusIds = process_inventory(\n row,\n 5,\n nodes,\n total_nodes,\n docs_map,\n skusIds,\n countries_alpha_3,\n countries_alpha_p,\n )\n total_docs = total_docs + added_docs\n if total_docs > doc_limit:\n break\n progress.update(added_docs)\n if total_docs > doc_limit:\n break\n progress.close()\n total_skids = len(list(skusIds.keys()))\n print(\n \"Generated {} total docs with {} distinct skids and {} distinct nodes\".format(\n total_docs, total_skids, total_nodes\n )\n )\n\n\ndef save_setup_csv_command_list():\n global all_csvfile, all_csv_writer, progress, doc, generated_row\n all_csvfile = open(all_fname, \"w\", newline=\"\")\n setup_csvfile = open(setup_fname, \"w\", newline=\"\")\n all_csv_writer = csv.writer(all_csvfile, delimiter=\",\")\n setup_csv_writer = csv.writer(setup_csvfile, delimiter=\",\")\n progress = tqdm(unit=\"docs\", total=total_docs)\n for doc in docs_map.values():\n generated_row = generate_ft_add_row(indexname, doc)\n all_csv_writer.writerow(generated_row)\n setup_csv_writer.writerow(generated_row)\n progress.update()\n progress.close()\n all_csvfile.close()\n setup_csvfile.close()\n\n\ndef generate_benchmark_commands():\n global all_csvfile, progress, doc, generated_row, total_updates, total_reads\n print(\"-- generating {} update/read commands -- \".format(total_benchmark_commands))\n print(\"\\t saving to {} and {}\".format(bench_fname, all_fname))\n all_csvfile = open(all_fname, \"a\", newline=\"\")\n bench_csvfile = open(bench_fname, \"w\", newline=\"\")\n all_csv_writer = csv.writer(all_csvfile, delimiter=\",\")\n bench_csv_writer = csv.writer(bench_csvfile, delimiter=\",\")\n docs_list = list(docs_map.values())\n skusIds_list = list(skusIds.keys())\n nodesIds = [\"{}\".format(x) for x in range(1, total_nodes)]\n progress = tqdm(unit=\"docs\", total=total_benchmark_commands)\n for _ in range(0, total_benchmark_commands):\n choice = random.choices([\"update\", \"read\"], weights=[update_ratio, read_ratio])[\n 0\n ]\n if choice == \"update\":\n random_doc_pos = random.randint(0, total_docs - 1)\n doc = docs_list[random_doc_pos]\n generated_row = generate_ft_add_update_row(indexname, doc)\n total_updates = total_updates + 1\n elif choice == \"read\":\n generated_row = generate_ft_aggregate_row(\n indexname, countries_alpha_3, countries_alpha_p, 0, 0, 0, 0\n )\n total_reads = total_reads + 1\n all_csv_writer.writerow(generated_row)\n bench_csv_writer.writerow(generated_row)\n progress.update()\n progress.close()\n bench_csvfile.close()\n all_csvfile.close()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"RediSearch FTSB data generator.\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n parser.add_argument(\n \"--project\", type=str, default=\"redisearch\", help=\"the project being tested\"\n )\n parser.add_argument(\n \"--update-ratio\",\n type=float,\n default=0.0,\n help=\"the total ratio of updates ( HSET ). The Aggregate ratio will be given by (1 - update-ratio)\",\n )\n parser.add_argument(\n \"--seed\",\n type=int,\n default=12345,\n help=\"the random seed used to generate random deterministic outputs\",\n )\n parser.add_argument(\n \"--doc-limit\",\n type=int,\n default=100000,\n help=\"the total documents to generate to be added in the setup stage\",\n )\n parser.add_argument(\n \"--total-benchmark-commands\",\n type=int,\n default=1000000,\n help=\"the total commands to generate to be issued in the benchmark stage\",\n )\n parser.add_argument(\n \"--index-name\",\n type=str,\n default=\"ecommerce\",\n help=\"the name of the RediSearch index to be used\",\n )\n parser.add_argument(\n \"--test-name\",\n type=str,\n default=\"10M-ecommerce-aggregate\",\n help=\"the name of the test\",\n )\n parser.add_argument(\n \"--test-description\",\n type=str,\n default=\"benchmark focused on aggregate performance\",\n help=\"the full description of the test\",\n )\n parser.add_argument(\n \"--countries-alpha3\",\n type=str,\n default=\"US,CA,FR,IL,UK,ES,PT,BR,AU\",\n help=\"comma separated full list of countries alpha3 codes used to populate the @market field. Needs to have the same number of elements as --countries-alpha3-probability\",\n )\n parser.add_argument(\n \"--countries-alpha3-probability\",\n type=str,\n default=\"0.6,0.05,0.05,0.05,0.05,0.05,0.05,0.05,0.05\",\n help=\"comma separated probability of the list of countries passed via --countries-alpha3. Needs to have the same number of elements as --countries-alpha3\",\n )\n parser.add_argument(\n \"--upload-artifacts-s3\",\n default=False,\n action=\"store_true\",\n help=\"uploads the generated dataset files and configuration file to public benchmarks.redislabs bucket. Proper credentials are required\",\n )\n parser.add_argument(\n \"--input-data-filename\",\n type=str,\n default=\"./../../../scripts/usecases/ecommerce/amazon_co-ecommerce_sample.csv\",\n help=\"path of the input file containing the origin CSV dataset to read the data from.\",\n )\n\n args = parser.parse_args()\n use_case_specific_arguments = del_non_use_case_specific_keys(dict(args.__dict__))\n\n # generate the temporary working dir if required\n seed = args.seed\n project = args.project\n doc_limit = args.doc_limit\n indexname = args.index_name\n test_name = args.test_name\n description = args.test_description\n s3_bucket_name = \"benchmarks.redislabs\"\n s3_bucket_path = \"redisearch/datasets/{}/\".format(test_name)\n s3_uri = \"https://s3.amazonaws.com/{bucket_name}/{bucket_path}\".format(\n bucket_name=s3_bucket_name, bucket_path=s3_bucket_path\n )\n\n benchmark_output_file = \"{test_name}.{project}.commands\".format(\n test_name=test_name, project=project\n )\n benchmark_config_file = \"{test_name}.{project}.cfg.json\".format(\n test_name=test_name, project=project\n )\n s3_uri = \"https://s3.amazonaws.com/{bucket_name}/{bucket_path}\".format(\n bucket_name=s3_bucket_name, bucket_path=s3_bucket_path\n )\n all_fname = \"{}.ALL.csv\".format(benchmark_output_file)\n setup_fname = \"{}.SETUP.csv\".format(benchmark_output_file)\n bench_fname = \"{}.BENCH.csv\".format(benchmark_output_file)\n all_fname_compressed = \"{}.ALL.tar.gz\".format(benchmark_output_file)\n setup_fname_compressed = \"{}.SETUP.tar.gz\".format(benchmark_output_file)\n bench_fname_compressed = \"{}.BENCH.tar.gz\".format(benchmark_output_file)\n remote_url_all = \"{}{}\".format(s3_uri, all_fname_compressed)\n remote_url_setup = \"{}{}\".format(s3_uri, setup_fname_compressed)\n remote_url_bench = \"{}{}\".format(s3_uri, bench_fname_compressed)\n json_version = \"0.1\"\n benchmark_repetitions_require_teardown_and_resetup = False\n\n ## remove previous files if they exist\n all_artifacts = [\n all_fname,\n setup_fname,\n bench_fname,\n all_fname_compressed,\n setup_fname_compressed,\n bench_fname_compressed,\n benchmark_config_file,\n ]\n for artifact in all_artifacts:\n remove_file_if_exists(artifact)\n\n seed = args.seed\n update_ratio = args.update_ratio\n read_ratio = 1 - update_ratio\n doc_limit = args.doc_limit\n total_benchmark_commands = args.total_benchmark_commands\n input_data_filename = args.input_data_filename\n used_indices = [indexname]\n setup_commands = []\n teardown_commands = []\n key_metrics = [\n {\n \"step\": \"benchmark\",\n \"metric-family\": \"throughput\",\n \"metric-json-path\": \"OverallRates.overallOpsRate\",\n \"metric-name\": \"Overall Updates and Aggregates query rate\",\n \"unit\": \"docs/sec\",\n \"metric-type\": \"numeric\",\n \"comparison\": \"higher-better\",\n \"per-step-comparison-metric-priority\": 1,\n },\n {\n \"step\": \"benchmark\",\n \"metric-family\": \"latency\",\n \"metric-json-path\": \"OverallQuantiles.allCommands.q50\",\n \"metric-name\": \"Overall Updates and Aggregates query q50 latency\",\n \"unit\": \"ms\",\n \"metric-type\": \"numeric\",\n \"comparison\": \"lower-better\",\n \"per-step-comparison-metric-priority\": 2,\n },\n {\n \"step\": \"benchmark\",\n \"metric-family\": \"throughput\",\n \"metric-json-path\": \"OverallRates.READ-R1Rate\",\n \"metric-name\": \"Overall Aggregates query rate\",\n \"unit\": \"docs/sec\",\n \"metric-type\": \"numeric\",\n \"comparison\": \"higher-better\",\n \"per-step-comparison-metric-priority\": None,\n },\n {\n \"step\": \"benchmark\",\n \"metric-family\": \"latency\",\n \"metric-json-path\": \"OverallQuantiles.READ-R1.q50\",\n \"metric-name\": \"Overall Aggregates query q50 latency\",\n \"unit\": \"ms\",\n \"metric-type\": \"numeric\",\n \"comparison\": \"lower-better\",\n \"per-step-comparison-metric-priority\": None,\n },\n {\n \"step\": \"benchmark\",\n \"metric-family\": \"throughput\",\n \"metric-json-path\": \"OverallRates.UPDATE-U1Rate\",\n \"metric-name\": \"Overall Updates query rate\",\n \"unit\": \"docs/sec\",\n \"metric-type\": \"numeric\",\n \"comparison\": \"higher-better\",\n \"per-step-comparison-metric-priority\": None,\n },\n {\n \"step\": \"benchmark\",\n \"metric-family\": \"latency\",\n \"metric-json-path\": \"OverallQuantiles.UPDATE-U1.q50\",\n \"metric-name\": \"Overall Updates query q50 latency\",\n \"unit\": \"ms\",\n \"metric-type\": \"numeric\",\n \"comparison\": \"lower-better\",\n \"per-step-comparison-metric-priority\": None,\n },\n {\n \"step\": \"setup\",\n \"metric-family\": \"throughput\",\n \"metric-json-path\": \"OverallRates.overallOpsRate\",\n \"metric-name\": \"Overall Ingestion speed\",\n \"unit\": \"docs/sec\",\n \"metric-type\": \"numeric\",\n \"comparison\": \"higher-better\",\n \"per-step-comparison-metric-priority\": 1,\n },\n {\n \"step\": \"setup\",\n \"metric-family\": \"latency\",\n \"metric-json-path\": \"OverallQuantiles.allCommands.q50\",\n \"metric-name\": \"Overall Ingestion q50 latency\",\n \"unit\": \"ms\",\n \"metric-type\": \"numeric\",\n \"comparison\": \"lower-better\",\n \"per-step-comparison-metric-priority\": 2,\n },\n ]\n total_writes = 0\n total_reads = 0\n total_updates = 0\n total_deletes = 0\n\n print(\"-- Benchmark: {} -- \".format(description))\n print(\"-- Description: {} -- \".format(description))\n\n countries_alpha_3 = args.countries_alpha3.split(\",\")\n countries_alpha_p = [float(x) for x in args.countries_alpha3_probability.split(\",\")]\n docs_map = {}\n nodes = {}\n skusIds = {}\n total_nodes = 0\n total_docs = 0\n\n countries_p_str = []\n for idx, country in enumerate(countries_alpha_3):\n countries_p_str.append(\"{} {}%\".format(country, countries_alpha_p[idx] * 100.0))\n print(\n \"Using {0} countries with the following probabilities {1}\".format(\n len(countries_alpha_3), \" \".join(countries_p_str)\n )\n )\n print(\"Using random seed {0}\".format(args.seed))\n random.seed(args.seed)\n\n generate_setup_commands()\n print(\"\\t saving to {} and {}\".format(setup_fname, all_fname))\n save_setup_csv_command_list()\n\n print(\"-- generating the ft.create commands -- \")\n ft_create_cmd = generate_ft_create_row(indexname, list(docs_map.values())[0])\n setup_commands.append(ft_create_cmd)\n\n print(\"-- generating the ft.drop commands -- \")\n ft_drop_cmd = generate_ft_drop_row(indexname)\n teardown_commands.append(ft_drop_cmd)\n\n generate_benchmark_commands()\n total_setup_commands = total_docs\n total_commands = total_setup_commands + total_benchmark_commands\n cmd_category_all = {\n \"setup-writes\": total_docs,\n \"writes\": total_writes,\n \"updates\": total_updates,\n \"reads\": total_reads,\n \"deletes\": total_deletes,\n }\n cmd_category_setup = {\n \"setup-writes\": total_docs,\n \"writes\": 0,\n \"updates\": 0,\n \"reads\": 0,\n \"deletes\": 0,\n }\n cmd_category_benchmark = {\n \"setup-writes\": 0,\n \"writes\": total_writes,\n \"updates\": total_updates,\n \"reads\": total_reads,\n \"deletes\": total_deletes,\n }\n\n status, uncompressed_size, compressed_size = compress_files(\n [all_fname], all_fname_compressed\n )\n inputs_entry_all = generate_inputs_dict_item(\n \"all\",\n all_fname,\n \"contains both setup and benchmark commands\",\n remote_url_all,\n uncompressed_size,\n all_fname_compressed,\n compressed_size,\n total_commands,\n cmd_category_all,\n )\n\n status, uncompressed_size, compressed_size = compress_files(\n [setup_fname], setup_fname_compressed\n )\n inputs_entry_setup = generate_inputs_dict_item(\n \"setup\",\n setup_fname,\n \"contains only the commands required to populate the dataset\",\n remote_url_setup,\n uncompressed_size,\n setup_fname_compressed,\n compressed_size,\n total_setup_commands,\n cmd_category_setup,\n )\n\n status, uncompressed_size, compressed_size = compress_files(\n [bench_fname], bench_fname_compressed\n )\n inputs_entry_benchmark = generate_inputs_dict_item(\n \"benchmark\",\n bench_fname,\n \"contains only the benchmark commands (required the dataset to have been previously populated)\",\n remote_url_bench,\n uncompressed_size,\n bench_fname_compressed,\n compressed_size,\n total_benchmark_commands,\n cmd_category_benchmark,\n )\n\n inputs = {\n \"all\": inputs_entry_all,\n \"setup\": inputs_entry_setup,\n \"benchmark\": inputs_entry_benchmark,\n }\n\n deployment_requirements = init_deployment_requirement()\n add_deployment_requirements_redis_server_module(\n deployment_requirements, \"search\", {}\n )\n add_deployment_requirements_utilities(\n deployment_requirements, \"ftsb_redisearch\", {}\n )\n add_deployment_requirements_benchmark_tool(\n deployment_requirements, \"ftsb_redisearch\"\n )\n\n run_stages = [\"setup\", \"benchmark\"]\n with open(benchmark_config_file, \"w\") as setupf:\n setup_json = generate_setup_json(\n json_version,\n project,\n use_case_specific_arguments,\n test_name,\n description,\n run_stages,\n deployment_requirements,\n key_metrics,\n inputs,\n setup_commands,\n teardown_commands,\n used_indices,\n total_commands,\n total_setup_commands,\n total_benchmark_commands,\n total_docs,\n total_writes,\n total_updates,\n total_reads,\n total_deletes,\n benchmark_repetitions_require_teardown_and_resetup,\n [\"setup\"],\n [\"benchmark\"],\n )\n json.dump(setup_json, setupf, indent=2)\n\n if args.upload_artifacts_s3:\n artifacts = [\n benchmark_config_file,\n all_fname_compressed,\n setup_fname_compressed,\n bench_fname_compressed,\n ]\n upload_dataset_artifacts_s3(s3_bucket_name, s3_bucket_path, artifacts)\n\n print(\"############################################\")\n print(\"All artifacts generated.\")\n","sub_path":"scripts/datagen_redisearch/ecommerce_aggregate/ftsb_generate_ecommerce_aggregate.py","file_name":"ftsb_generate_ecommerce_aggregate.py","file_ext":"py","file_size_in_byte":31907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"73559843","text":"import numpy as np\nimport cv2\nfrom PIL import Image\nfrom enum import Enum\nimport time\nimport urllib.request\nimport json\nimport socketio\nimport zbarlight\n\nWIDTH = 1920\nHEIGHT = 1080\nFPS = 60\nSEARCH_LINE = [250, 550, 850]\nSERVER_URL = 'http://localhost:3000'\nHTTP_HEADERS = {'Content-Type': 'application/json', 'charset': 'utf-8'}\nCOURSE_NUM = 1\n\nsio = socketio.Client()\nsio.connect(SERVER_URL)\n\n\nclass State(Enum):\n WAITING = 0\n REGISTERING = 1\n RACING = 2\n FINISHED = 3\n\n\ndef main():\n state = State.WAITING\n sio.emit('mymsg', 'python')\n\n # cap = cv2.VideoCapture('./miniyonku4.mp4')\n cap = cv2.VideoCapture(0)\n\n cap.set(cv2.CAP_PROP_FPS, FPS)\n cap.set(cv2.CAP_PROP_FRAME_WIDTH, WIDTH)\n cap.set(cv2.CAP_PROP_FRAME_HEIGHT, HEIGHT)\n\n players = [None, None, None]\n detected_players_array = [[], [], []]\n players_start_time = [None, None, None]\n players_time = [None, None, None]\n players_mask = [False, False, False]\n\n reg_flg = False\n race_id = None\n\n while True:\n ret, frame = cap.read()\n screen = frame.copy()\n key = cv2.waitKey(20) & 0xFF\n\n # 常に表示する項目\n result = [None, None, None]\n for i in range(3):\n result[i], detected_pos = read_barcode(frame, SEARCH_LINE[i])\n if detected_pos is not None:\n screen = cv2.line(screen, (detected_pos, SEARCH_LINE[i] + 150), (detected_pos, SEARCH_LINE[i] - 150), (255, 0, 0), 2)\n # 走査範囲を表す線を描画\n screen = cv2.line(screen, (0, SEARCH_LINE[i] - 150), (WIDTH, SEARCH_LINE[i] - 150), (0, 255, 0), 1)\n screen = cv2.line(screen, (0, SEARCH_LINE[i]), (WIDTH, SEARCH_LINE[i]), (0, 0, 255), 1)\n screen = cv2.line(screen, (0, SEARCH_LINE[i] + 150), (WIDTH, SEARCH_LINE[i] + 150), (0, 255, 0), 1)\n # プレイヤーのバーコードを表示\n cv2.putText(screen, str(players[i]), (int(WIDTH/4*1), SEARCH_LINE[i]), cv2.FONT_HERSHEY_PLAIN, 2, (33, 33, 33), 2, cv2.LINE_AA)\n # 検出したバーコードを表示\n cv2.putText(screen, str(result[i]), (int(WIDTH/4*2), SEARCH_LINE[i]), cv2.FONT_HERSHEY_PLAIN, 2, (33, 33, 33), 2, cv2.LINE_AA)\n # タイムを表示\n if players_time[i] is None:\n cv2.putText(screen, str(players_time[i]), (int(WIDTH/4*3), SEARCH_LINE[i]), cv2.FONT_HERSHEY_PLAIN, 2, (33, 33, 33), 2, cv2.LINE_AA)\n else:\n cv2.putText(screen, str(players_time[i]), (int(WIDTH/4*3), SEARCH_LINE[i]), cv2.FONT_HERSHEY_PLAIN, 2, (54, 67, 244), 2, cv2.LINE_AA)\n\n # ここからstate別処理\n # WAITING\n if state == State.WAITING:\n cv2.putText(screen, 'Waiting', (0, 50), cv2.FONT_HERSHEY_PLAIN, 4, (80, 175, 76), 4, cv2.LINE_AA)\n # 変数初期化\n players = [None, None, None]\n detected_players_array = [[], [], []]\n players_start_time = [None, None, None]\n players_time = [None, None, None]\n players_mask = [False, False, False]\n reg_flg = False\n race_id = None\n\n # REGISTERING\n elif state == State.REGISTERING:\n cv2.putText(screen, 'Registering', (0, 50), cv2.FONT_HERSHEY_PLAIN, 4, (0, 152, 255), 4, cv2.LINE_AA)\n # 何かしらかバーコードを検出\n if any(result):\n for i in range(3):\n if result[i] is not None:\n players[i] = result[i]\n players_mask[i] = True\n\n cv2.putText(screen, str(players) + 'OK?', (0, 100), cv2.FONT_HERSHEY_PLAIN, 2, (0, 152, 255), 2, cv2.LINE_AA)\n if reg_flg is True:\n cv2.putText(screen, 'DONE RACE_ID: ' + str(race_id), (0, 150), cv2.FONT_HERSHEY_PLAIN, 2, (0, 152, 255), 2, cv2.LINE_AA)\n cv2.putText(screen, 'PLAYERS MASK: ' + str(players_mask), (0, 200), cv2.FONT_HERSHEY_PLAIN, 2, (0, 152, 255), 2, cv2.LINE_AA)\n if key == ord('y'):\n players_data = []\n for i in range(3):\n if players[i] is not None:\n players_data.append(players[i])\n print('register')\n url = SERVER_URL + '/races'\n json_data = {\n 'course': COURSE_NUM,\n 'players': players_data\n }\n print(json.dumps(json_data).encode())\n req = urllib.request.Request(url, json.dumps(json_data).encode(), HTTP_HEADERS)\n with urllib.request.urlopen(req) as res:\n body = res.read()\n race_id = body.decode()\n reg_flg = True\n sio.emit('race_registered', race_id)\n\n # RACING\n elif state == State.RACING:\n cv2.putText(screen, 'Running', (0, 50), cv2.FONT_HERSHEY_PLAIN, 4, (54, 67, 244), 4, cv2.LINE_AA)\n\n for i in range(3):\n # 検出時\n if result[i] is not None:\n # 初回\n if len(detected_players_array[i]) == 0:\n sio.emit('race_started', {'course': COURSE_NUM, 'lane': i+1})\n\n detected_players_array[i].append(result[i])\n players_start_time[i] = time.time()\n # 2回目以降\n elif time.time() - players_start_time[i] > 1:\n detected_players_array[i].append(result[i])\n # ゴール時\n if detected_players_array[i][0] == detected_players_array[i][-1] and players_time[i] is None:\n players_time[i] = round(time.time() - players_start_time[i], 3)\n print(players_time[i])\n\n url = SERVER_URL + '/races/' + str(race_id)\n print(url)\n json_data = {\n 'barcode': result[i],\n 'raptime': int(players_time[i]*1000)\n }\n print(json.dumps(json_data).encode())\n req = urllib.request.Request(url, json.dumps(json_data).encode(), HTTP_HEADERS)\n with urllib.request.urlopen(req) as res:\n body = res.read()\n sio.emit('race_finished', {'course': COURSE_NUM, 'lane': i+1})\n\n # FINISHED\n elif state == State.FINISHED:\n sio.emit('race_end', race_id)\n for i in range(3):\n cv2.putText(screen, (str(players_time[i]) + ': OK'), (int(WIDTH / 4 * 3), SEARCH_LINE[i]), cv2.FONT_HERSHEY_PLAIN, 2, (54, 67, 244), 4, cv2.LINE_AA)\n\n state = State.WAITING\n\n # 描画\n cv2.imshow('screen', screen)\n\n # キー操作\n if key == 27: # Escape プログラム終了\n break\n\n elif key == ord(' '): # Space 次へ進む\n if state == State.WAITING:\n state = State.REGISTERING\n elif state == State.REGISTERING:\n state = State.RACING\n elif state == State.RACING:\n state = State.FINISHED\n\n elif state == State.RACING and key == ord('q'): # Q 試合やりなおし\n detected_players_array = [[], [], []]\n players_start_time = [None, None, None]\n players_time = [None, None, None]\n\n cap.release()\n cv2.destroyAllWindows()\n\n\ndef shear(image, deg):\n h, w = image.shape[:2]\n src = np.array([[0.0, 0.0], [0.0, 1.0], [1.0, 0.0]], np.float32)\n dest = src.copy()\n dest[:, 0] += (deg / h * (h - src[:, 1])).astype(np.float32)\n affine = cv2.getAffineTransform(src, dest)\n return cv2.warpAffine(image, affine, (w, h))\n\n\ndef read_barcode(frame, y):\n search_line = (frame[y, :, 0] < 100) * (frame[y, :, 1] < 100) * (frame[y, :, 2] > 200)\n detected_pos = 0\n for i in reversed(range(200, WIDTH)):\n if search_line[i]:\n detected_pos = i\n break\n if detected_pos != 0:\n barcode = frame[y - 150:y + 150, detected_pos - 100:detected_pos]\n for deg in range(20, 60, 5):\n sheared = shear(barcode, deg)\n sheared_pil = Image.fromarray(cv2.cvtColor(sheared, cv2.COLOR_BGR2RGB))\n detected_barcode = zbarlight.scan_codes('upce', sheared_pil)\n\n if detected_barcode is not None:\n result = detected_barcode[0].decode('utf-8')[1:7]\n return result, detected_pos\n\n return None, detected_pos\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"cam/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"291309749","text":"from django.shortcuts import render_to_response, get_object_or_404\nfrom django.template import RequestContext\nfrom django.core.paginator import Paginator, InvalidPage, EmptyPage\nfrom .models import Noticias\nfrom publicaciones.models import Publicacion\nfrom eventos.models import Evento\nfrom galerias.models import Galeria\n\n\ndef lista_noticias(request):\n publicaciones = Publicacion.objects.order_by('-id')[:4]\n eventos = Evento.objects.order_by('-id')[:4]\n noticia = Noticias.objects.order_by('-id')\n\n paginator = Paginator(noticia, 6)\n\n try:\n page = int(request.GET.get('page', '1'))\n except ValueError:\n page = 1\n\n try:\n objetos = paginator.page(page)\n except (EmptyPage, InvalidPage):\n objetos = paginator.page(paginator.num_pages)\n\n return render_to_response('noticias/lista-noticias.html', locals(),\n context_instance=RequestContext(request))\n\ndef detalle_noticia(request, slug):\n detalle = get_object_or_404(Noticias, slug=slug)\n\n ultimas = Noticias.objects.all().exclude(id=detalle.id).order_by('-id')[:7]\n\n return render_to_response('noticias/detalle_noticia.html', {'detalle':detalle, 'ultimas':ultimas},\n context_instance=RequestContext(request))\n\ndef lista_galerias(request):\n\n eventos = Evento.objects.order_by('-id')[:4]\n noticia = Noticias.objects.order_by('-id')[:4]\n galerias = Galeria.objects.all()\n\n paginator = Paginator(galerias, 3)\n\n try:\n page = int(request.GET.get('page', '1'))\n except ValueError:\n page = 1\n\n try:\n objetos = paginator.page(page)\n except (EmptyPage, InvalidPage):\n objetos = paginator.page(paginator.num_pages)\n\n return render_to_response('galerias/galeria_list.html', locals(),\n context_instance=RequestContext(request))\n","sub_path":"noticias/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"346667780","text":"import os\nimport traceback\nimport pandas as pd\nfrom igf_data.illumina.basesMask import BasesMask\nfrom igf_data.utils.sequtils import rev_comp\nfrom igf_data.illumina.samplesheet import SampleSheet\nfrom igf_data.utils.fileutils import get_temp_dir,copy_local_file,check_file_path,remove_dir\nfrom igf_data.process.singlecell_seqrun.processsinglecellsamplesheet import ProcessSingleCellSamplesheet\nfrom igf_data.process.singlecell_seqrun.processsinglecellsamplesheet import ProcessSingleCellDualIndexSamplesheet\nfrom igf_data.process.seqrun_processing.find_and_process_new_seqrun import validate_samplesheet_for_seqrun\nfrom igf_data.process.seqrun_processing.find_and_process_new_seqrun import check_for_registered_project_and_sample\n\ndef get_formatted_samplesheet_per_lane(\n samplesheet_file, singlecell_barcode_json, singlecell_dual_barcode_json, runinfo_file, output_dir,\n platform, filter_lane=None, single_cell_tag='10X', index1_rule=None, index2_rule=None):\n \"\"\"\n A function for filtering and reformatting samplesheet files and splitting the data per lane\n\n :param samplesheet_file: Samplesheet file path\n :param singlecell_barcode_json: Singlecell barcode json path\n :param singlecell_dual_barcode_json: Single cell dual barcode json path\n :param runinfo_file: Path to RunInfo.xml file\n :param platform: Platform name for setting sc dual index workflow\n :param output_dir: Output dir path\n :param filter_lane: Lane to filter Samplesheeet data, default None\n :param single_cell_tag: Tag for singlecell samples, default 10X\n :param index1_rule: Rules for I7 index modification, default None, use REVCOMP\n :param index2_rule: Rules for I5 index modification, default None, use REVCOMP\n :returns: A list of dictionaries containing the following keys\n\n * lane_id\n * samplesheet_file\n * bases_mask\n\n \"\"\"\n try:\n tmp_dir = get_temp_dir()\n tmp_file = \\\n os.path.join(\n tmp_dir,\n os.path.basename(samplesheet_file))\n sc_dual_process = \\\n ProcessSingleCellDualIndexSamplesheet(\n samplesheet_file=samplesheet_file,\n singlecell_dual_index_barcode_json=singlecell_dual_barcode_json,\n platform=platform,\n index2_rule=index2_rule)\n sc_dual_process.\\\n modify_samplesheet_for_sc_dual_barcode(\n output_samplesheet=tmp_file)\n samplesheet_file = tmp_file\n tmp_dir = get_temp_dir()\n tmp_file = \\\n os.path.join(\n tmp_dir,\n os.path.basename(samplesheet_file))\n sc_data = \\\n ProcessSingleCellSamplesheet(\n samplesheet_file,\n singlecell_barcode_json,\n single_cell_tag)\n sc_data.\\\n change_singlecell_barcodes(tmp_file)\n sa = SampleSheet(tmp_file)\n if 'Lane' not in sa._data_header:\n raise ValueError(\n 'Lane not present in samplesheet {0}'.\\\n format(samplesheet_file))\n if filter_lane is not None and \\\n int(filter_lane) in range(1,9):\n sa = SampleSheet(tmp_file)\n sa.filter_sample_data(\n condition_key='Lane',\n condition_value=str(filter_lane))\n lanes = sa.get_lane_count()\n file_list = list()\n for lane_id in lanes:\n sa = SampleSheet(tmp_file)\n sa.filter_sample_data(\n condition_key='Lane',\n condition_value=str(lane_id))\n df = pd.DataFrame(sa._data)\n df['Lane'] = \\\n df['Lane'].astype(str)\n lane_df = df[df['Lane']==str(lane_id)].copy()\n if len(lane_df.index)==0:\n raise ValueError(\n 'No data present in samplesheet {0}, lane {1}'.\\\n format(samplesheet_file,lane_id))\n min_index1 = \\\n lane_df['index'].\\\n map(lambda x: len(x)).min()\n lane_df.loc[:,'index'] = \\\n lane_df['index'].\\\n map(lambda x: x[0:min_index1])\n if 'index2' in lane_df.columns:\n min_index2 = \\\n lane_df['index2'].\\\n map(lambda x: len(x)).min()\n lane_df.loc[:,'index2'] = \\\n lane_df['index2'].\\\n map(lambda x: x[0:min_index2])\n lane_df.loc[:,'c_index'] = \\\n lane_df['index']+lane_df['index2']\n else:\n lane_df.loc[:,'c_index'] = lane_df['index']\n lane_df.\\\n drop_duplicates('c_index',inplace=True)\n lane_df.drop('c_index',axis=1,inplace=True)\n filename = \\\n '{0}_{1}'.format(\n os.path.basename(samplesheet_file),\n lane_id)\n tmp_filepath = \\\n os.path.join(tmp_dir,filename)\n target_filepath = \\\n os.path.join(output_dir,filename)\n sa._data = \\\n lane_df.to_dict(orient='records')\n if index1_rule is not None and \\\n index1_rule=='REVCOMP':\n sa.get_reverse_complement_index('index')\n if index2_rule is not None and \\\n index2_rule=='REVCOMP':\n sa.get_reverse_complement_index('index2')\n sa.print_sampleSheet(tmp_filepath)\n copy_local_file(\n tmp_filepath,\n target_filepath)\n bases_mask_object = \\\n BasesMask(\n samplesheet_file=tmp_filepath,\n runinfo_file=runinfo_file,\n read_offset=1,\n index_offset=0)\n bases_mask_value = \\\n bases_mask_object.\\\n calculate_bases_mask()\n file_list.\\\n append({\n 'lane_id':lane_id,\n 'samplesheet_file':target_filepath,\n 'bases_mask':bases_mask_value})\n return file_list\n except Exception as e:\n traceback.print_exc()\n raise ValueError(\n 'Failed to format samplesheet, error: {0}'.\\\n format(e))\n\n\ndef samplesheet_validation_and_metadata_checking(\n samplesheet_file,schema_json_file,log_dir,seqrun_id,db_config_file):\n \"\"\"\n A function for samplesheet validation and metadata checking\n\n :param samplesheet_file: A Samplesheet file path\n :param schema_json_file: A JSON schema for samplesheet validation checking\n :param log_dir: Path for log dir\n :param seqrun_id: Sequencing run id\n :param db_config_file: DB config file\n ;returns: A list of error file paths\n \"\"\"\n try:\n tmp_dir = get_temp_dir()\n validation_output = list()\n _,error_file_list = \\\n validate_samplesheet_for_seqrun(\n seqrun_info={seqrun_id:os.path.dirname(samplesheet_file)},\n schema_json=schema_json_file,\n output_dir=tmp_dir,\n samplesheet_file=os.path.basename(samplesheet_file))\n if len(error_file_list.keys()) > 0:\n tmp_err_file = \\\n error_file_list.\\\n get(seqrun_id)\n if tmp_err_file is None or \\\n tmp_err_file == '':\n raise ValueError('No validation error file found')\n target_file = \\\n os.path.join(\n log_dir,\n os.path.basename(tmp_err_file))\n copy_local_file(\n tmp_err_file,\n target_file)\n validation_output.\\\n append(target_file)\n _,msg = \\\n check_for_registered_project_and_sample(\n seqrun_info={seqrun_id:os.path.dirname(samplesheet_file)},\n dbconfig=db_config_file,\n samplesheet_file=os.path.basename(samplesheet_file))\n if msg != '' and \\\n msg is not None:\n tmp_file = \\\n os.path.join(\n tmp_dir,\n '{0}_metadata_error.txt'.\\\n format(os.path.basename(samplesheet_file)))\n with open(tmp_file,'w') as fp:\n fp.write('{0}\\n'.format(msg))\n target_file = \\\n os.path.join(\n log_dir,\n os.path.basename(tmp_file))\n copy_local_file(\n tmp_file,\n target_file)\n validation_output.\\\n append(target_file)\n remove_dir(tmp_dir)\n return validation_output\n except Exception as e:\n raise ValueError(\n 'Failed samplesheet checking, error: {0}'.format(e))","sub_path":"igf_data/utils/samplesheet_utils.py","file_name":"samplesheet_utils.py","file_ext":"py","file_size_in_byte":7671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"555390142","text":"\"\"\"Problem: \n - Given a valid (IPv4) IP address, return a defanged version of that IP address.\n A defanged IP address replaces every period \".\" with \"[.]\".\n\n Example Input:\n - Input: address = \"1.1.1.1\"\n - Input: address = \"255.100.50.0\"\n\n Example Output:\n - Output: \"1[.]1[.]1[.]1\"\n - Output: \"255[.]100[.]50[.]0\"\n\n-1 Restate the problem\n - add brackets around the period in an ipv4 address i.e defang it\n-2 Ask clarifying questions\n - Can we use regex?\n\n-3 State your assumptions\n - The given address is a valid IPv4 address.\n\n-4 Think out loud\n -4a Brainstorm solutions\n - use regex to substitue/replace any \\. with [.]\n -4b Explain your rationale\n - regex makes it simple and more powerful\n - (we need to use \\ to escape the . or it would just do it for any/all characters)\n -4c Discuss tradeoffs\n -\n -4d Suggest improvements\n -\n\"\"\"\n\n\"\"\"Pseudo Approach\n - split the ip/string into characters, loop through them looking for a . and add [] to it then rejoing the string/ip and return it\n\n Edge Cases:\n -\n\n Complexity Check:\n After implementing some code go back through and revaluate its time and/or space complexity -- refractor/improve/find more edge cases -- repeat\n\"\"\"\n\nfrom re import sub\n\nclass Solution:\n def defangIPaddr(self, address: str) -> str:\n address = sub(\"\\.\", \"[.]\", address)\n return address\n\nif __name__ == \"__main__\": # RunTime:\n s = Solution()\n param = \"1.1.1.1\"\n function_output = s.defangIPaddr(param)\n print(param)\n print(function_output)\n","sub_path":"HW7/defangIPaddr.py","file_name":"defangIPaddr.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"308601972","text":"\"\"\"\ngRPC server wrapper\n\"\"\"\nimport time\nfrom concurrent import futures\n\nimport grpc\n\nfrom oslo_config import cfg\n\nfrom cloudkeeper_os.grpc.cloudkeeper_grpc_python import cloudkeeper_pb2_grpc\nfrom cloudkeeper_os.grpc.core_connector import CoreConnector\n\n\nCONF = cfg.CONF\nONE_DAY_IN_SECONDS = 60 * 60 * 24\nGRACE_PERIOD = 5\n\n\ndef serve():\n \"\"\"\n Configure and start gRPC server\n \"\"\"\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n cloudkeeper_pb2_grpc.add_CommunicatorServicer_to_server(CoreConnector(), server)\n server.add_secure_port(CONF.connection.listen_address, _credentials())\n server.start()\n try:\n while True:\n time.sleep(ONE_DAY_IN_SECONDS)\n except (KeyboardInterrupt, SystemExit):\n server.stop(GRACE_PERIOD)\n\n\ndef _credentials():\n if not CONF.connection.authentication:\n return grpc.ServerCredentials(None)\n\n with open(CONF.connection.key, \"rb\") as key_file:\n key = key_file.read()\n\n with open(CONF.connection.certificate, \"rb\") as cert_file:\n certificate = cert_file.read()\n\n with open(CONF.connection.core_certificate, \"rb\") as core_cert_file:\n core_certificate = core_cert_file.read()\n\n return grpc.ssl_server_credentials((key, certificate), core_certificate, True)\n","sub_path":"cloudkeeper_os/grpc/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"452666489","text":"\"\"\"\n前缀和计算\n\"\"\"\n\ndef cal_sum_of_array(nums, k):\n \"\"\"\n 传统写法\n preSum[i]就是nums[0..i-1]的和。\n 如果我们想求nums[i..j]的和,只需要一步操作preSum[j+1]-preSum[i]即可,而不需要重新去遍历数组了。\n\n 时间复杂度:O(N^2)\n 空间复杂度:O(N)\n \"\"\"\n n = len(nums)\n # 前缀和列表\n presum = [0]\n\n # 计算前缀和\n for i in range(n):\n presum.append(presum[i]+nums[i])\n\n # 前缀个数\n ans = 0\n for i in range(1, n+1):\n for j in range(0, i):\n if presum[i] - presum[j] == k:\n ans += 1\n return ans\n\nnums = [1, 2, 3, 2]\ncal_sum_of_array(nums, 7)","sub_path":"13Optimized/prefix/presum_ordinary.py","file_name":"presum_ordinary.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"226344213","text":"\"\"\"The program WindChill that takes two double command­line arguments t\nand v and prints the wind chill using given formula.\n\n@author Amit Kumar\n@version 1.0\n@since 01/01/2019\n\"\"\"\n\n# importing important modules\nimport sys\n\n\ndef wind_chill_prob(t, v):\n w = 0.0\n if (t <= 50) and (t <= 120) and (t >= 3): # applying condition when value of 't' is valid\n w = 35.74+(0.6215*t)+(((0.4275*t)-35.75) * (v ** 0.16))\n else:\n print(\"Value of 't' should not exceed more than 50\\nValue of 'v' should not exceed more than 120 or less \"\n \"than 3\t\")\n return w\n\n\nglobal t1, v1\n\ntry:\n t1 = int(sys.argv[1]) # getting value of t from command-line argument from user\n v1 = int(sys.argv[2]) # getting value of v from command-line argument from user\nexcept Exception as e:\n print(e)\n\nres = wind_chill_prob(t1, v1)\nprint(\"Effective temperature (the wind chill): \", res)\n","sub_path":"FunctionalPrograms/Problem16_WindChill.py","file_name":"Problem16_WindChill.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"137771777","text":"import cv2\nimport numpy as np\nimport sys\nimport serial\nimport struct\nimport time\nimport math\nfrom imutils.video.pivideostream import PiVideoStream\nfrom imutils.video import FPS\n\nimport imutils\nfrom simple_pid import PID\nkp =.2\nki =0\nkd =0\npid = PID(kp, ki, kd, setpoint=0,output_limits=(-25, 25))\nlist=[]\ndef most_frequent(List):\n counter = 0\n num = List[0]\n\n for i in List:\n curr_frequency = List.count(i)\n if(curr_frequency> counter):\n counter = curr_frequency\n num = i\n\n return num\nupper1 = np.array([ 10, 298, 192])\nlower1 = np.array([-10, 87, 12])\nupper2 = np.array([185, 271, 202])\nlower2 = np.array([165, 141, 22])\nsrc = np.array([[27, 425], [600, 428], [525, 5], [126, 6]], dtype=\"float32\")\n\ndst = np.array([[100, 480], [540, 480], [540, 0], [100, 0]], dtype=\"float32\")\nM = cv2.getPerspectiveTransform(src, dst)\nimage_hsv = None\npixel = (0,0,0) #RANDOM DEFAULT VALUE\ntry:\n arduino=serial.Serial('/dev/ttyUSB0',baudrate=9600, timeout = 3.0)\nexcept:\n arduino=serial.Serial('/dev/ttyUSB1',baudrate=9600, timeout = 3.0)\n\n#video = cv2.VideoCapture(0)\nvideo = PiVideoStream(resolution=(640,480),framerate=60).start()\ntime.sleep(2.0)\ntimeCheck = time.time()\nwhile True:\n image_src = video.read()\n image_src = cv2.flip( image_src, -1)\n ################# color transformation and filter -------> binary image\n image_hsv = cv2.cvtColor(image_src,cv2.COLOR_BGR2HSV)\n image_mask = cv2.inRange(image_hsv,lower1,upper1) + cv2.inRange(image_hsv,lower2,upper2)\n\n ############################ warp tranform ------------> birds eye view\n warped = cv2.warpPerspective(image_mask, M, (640,480))\n \n #cv2.imshow(\"aaa\", warped)\n #cv2.waitKey(1)\n ############ for line detection\n edges = cv2.Canny(warped, 30,50)\n section= 200\n roi = edges[section:,:]\n lines = cv2.HoughLinesP(roi, 1, np.pi/180, 30, minLineLength= 30,maxLineGap=20,)\n try:\n for x in range(0, len(lines)):\n for x1,y1,x2,y2 in lines[x]:\n angle = np.arctan2(y1 - y2, x1 - x2) *(180/ np.pi)\n if ((90< angle <105) or (-105< angle < -90)):\n #derecho\n list.append(0)\n elif(-105 >= angle):\n #izquierda\n list.append(1)\n elif(angle >= 105):\n #derecha\n list.append(2)\n\n action =most_frequent(list)\n except:\n action = 3\n\n #slope =np.append(m)\n #cv2.line(warped1,(x1,y1),(x2,y2),(0,255,0),2)\n #############probabilities of what action it is //////straight: action =0 | left: action = 1 | right: action = 2\n ################################################## actions are based if there is a straight line or curves\n ################################################## straight: action =0 | left: action = 1 | right: action = 2\n #action =0\n if(action== 0):\n ################## peak histogram (where the lines start in the bottom)\n histogram = np.sum(warped[350:,:],axis =0)\n leftp= np.argmax(histogram[:320])\n rightp= np.argmax(histogram[320:])+320\n\n ### find bad values (true if there is a missing peak/line) and add a tolerance\n lbad = True if((leftp == 0) or (histogram[leftp] < 2000)) else False\n rbad = True if((rightp == 320) or (histogram[rightp] < 2000)) else False\n\n ################## ERROR from peaks leftbound=165 rightbound=440\n ###### servo move 70izq 105straight 140right\n leftbound = 60\n rightbound = 575\n error=0\n count=0\n if(not lbad):\n error =error +(leftp-leftbound)\n count += 1\n if(not rbad):\n error = error+(rightp-rightbound)\n count += 1\n try:\n error= error/count\n\n pidout=pid(error)\n pidout = pidout* -1\n servo= pidout+105\n #print(error)\n arduino.write(struct.pack('>B',int(servo)))\n print(int(servo))\n except:\n action=3\n elif(action == 1):\n arduino.write(struct.pack('>B',70))\n\n elif(action == 2):\n arduino.write(struct.pack('>B',140))\n elif(action == 3):\n a=1\n list.clear()\n\n ############### print parameters\n #----number of lines\n #print(len(lines))\n\n #----- peaks\n #print(leftp, rightp)\n #----- bad or good peaks\n #print(lbad,rbad)\n #----- line error (left = negative right = positive)\n #print(error)\n ############## matplotlib show\n #a = plt.subplot(2,1,1)\n #plt.imshow(warped)\n #plt.subplot(2,1,2, sharex= a)\n #plt.plot(histogram)\n\n ###############opencv show\n #cv2.imshow(\"edges\",edges)\n #cv2.imshow(\"aaa\", warped1)\n #cv2.waitKey(20)\n #cv2.imshow(\"warpedmask\", warped)\n #cv2.imshow(\"HSV\",image_hsv)\n #cv2.imshow(\"a\",image_src)\n \n #print(1/(time.time() - timeCheck))\n \n\t\n\n\n #plt.show()\n\n\n\nif __name__=='__main__':\n main()\n","sub_path":"lane-follower/carf.py","file_name":"carf.py","file_ext":"py","file_size_in_byte":5007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"124501778","text":"from io import BytesIO\nfrom PIL import Image\n\n\ndef add_watermark(img_data, mask_file):\n \"\"\"添加图像水印\"\"\"\n img = Image.open(BytesIO(img_data))\n watermark = Image.open(mask_file)\n left = img.size[0] - watermark.size[0]\n top = img.size[1] - watermark.size[1]\n img.paste(img, (0, 0))\n img.paste(watermark, (left, top), mask=watermark)\n output = BytesIO()\n img.save(output, format='PNG')\n return output.getvalue()\n","sub_path":"src/website/utils/graphics.py","file_name":"graphics.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"283745988","text":"#! /usr/bin/env python3\n\nfrom agent import RandomAgent, GreedyAgent, EpsilonGreedyAgent\nfrom bandit import Bandit\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\n\nNUM_ARMS = 10\nNUM_GAMES = 2000\nNUM_STEPS = 1000\n\n\ndef simulate_random_agent(games):\n data = {\n \"actions\": [],\n \"rewards\": np.zeros(NUM_STEPS)\n }\n for g in tqdm(range(NUM_GAMES), desc=\"Random Agent\"):\n agent = RandomAgent(NUM_ARMS, NUM_STEPS)\n game = games[g]\n\n actions, rewards = agent.play(game)\n\n data[\"actions\"].extend(actions)\n data[\"rewards\"] += rewards\n\n # Convert sum to average reward per step.\n data[\"rewards\"] /= NUM_GAMES\n\n return data\n\ndef simulate_greedy_agent(games):\n data = {\n \"actions\": [],\n \"rewards\": np.zeros(NUM_STEPS)\n }\n for g in tqdm(range(NUM_GAMES), desc=\"Greedy Agent\"):\n agent = GreedyAgent(NUM_ARMS, NUM_STEPS)\n game = games[g]\n\n actions, rewards = agent.play(game)\n\n data[\"actions\"].extend(actions)\n data[\"rewards\"] += rewards\n\n # Convert sum to average reward per step.\n data[\"rewards\"] /= NUM_GAMES\n\n return data\n\ndef simulate_epsilon_greedy_agent(games, epsilon):\n data = {\n \"actions\": [],\n \"rewards\": np.zeros(NUM_STEPS)\n }\n for g in tqdm(range(NUM_GAMES), desc=f\"Epsilon Greedy Agent ({epsilon})\"):\n agent = EpsilonGreedyAgent(epsilon, NUM_ARMS, NUM_STEPS)\n game = games[g]\n\n actions, rewards = agent.play(game)\n\n data[\"actions\"].extend(actions)\n data[\"rewards\"] += rewards\n\n # Convert sum to average reward per step.\n data[\"rewards\"] /= NUM_GAMES\n\n return data\n\n\nif __name__ == \"__main__\":\n games = [Bandit(NUM_ARMS) for _ in range(NUM_GAMES)]\n reward_distribution = [[] for _ in range(NUM_ARMS)]\n\n for game in games:\n rewards = game.get_rewards()\n for i in range(NUM_ARMS):\n reward_distribution[i].append(rewards[i])\n\n\n plt.violinplot(reward_distribution, range(NUM_ARMS), showmeans=True)\n plt.xticks(range(NUM_ARMS))\n plt.xlabel(\"Actions\")\n plt.ylabel(\"Reward Distribution\")\n plt.show()\n\n r_data = simulate_random_agent(games)\n g_data = simulate_greedy_agent(games)\n e_data_1 = simulate_epsilon_greedy_agent(games, 0.1)\n e_data_2 = simulate_epsilon_greedy_agent(games, 0.01)\n\n timesteps = range(NUM_STEPS)\n\n plt.plot(timesteps, r_data[\"rewards\"], color=\"black\", linewidth=0.5)\n plt.plot(timesteps, g_data[\"rewards\"], color=\"green\", linewidth=0.5)\n plt.plot(timesteps, e_data_1[\"rewards\"], color=\"blue\", linewidth=0.5)\n plt.plot(timesteps, e_data_2[\"rewards\"], color=\"red\", linewidth=0.5)\n\n plt.ylim(bottom=0)\n plt.legend([\"Random\", \"Greedy\", \"Epsilon Greedy (0.1)\", \"Epsilon Greedy (0.01)\"])\n plt.xlabel(\"Timesteps\")\n plt.ylabel(\"Average Reward\")\n\n plt.show()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"469716334","text":"# -*- coding:utf-8 -*-\n\nimport re\nimport string\nimport time,sys\n# reload(sys)\n# sys.setdefaultencoding('utf8')\n\n\nclass Parse():\n def __init__(self):\n self.source=None\n self.printable = set(string.printable)\n\n def parse(self,source):\n '''\n '''\n self.source=source\n ########remove unicodes and non printable characters\n self.source = re.sub(r'[^\\x00-\\x7F]',' ',self.source)\n ######## removing all non printable characters\n self.source = ''.join(filter(lambda x:x in self.printable, self.source))\n ####### remove white space from the beginning of the line\n self.source= re.sub(r'^( +)',r' ',self.source,flags=re.IGNORECASE|re.MULTILINE)\n ####### remove lines containing number followed by dot(.) that appearing in the text\n self.source = re.sub(r'(^\\s*\\d+.\\r*\\n)|^( +)',r' ',self.source,flags=re.IGNORECASE|re.MULTILINE)\n ###### removing one or more whitespaces from the begining and end of each line\n self.source = re.sub(r'(^( +)|( +)$)',r' ',self.source,flags=re.IGNORECASE|re.MULTILINE)\n ###### removing all jpeg embedded in document\n self.source = re.sub(r'((\\!\\[\\]\\(data:image\\/).*\\r*\\n)',r' ',self.source,flags=re.IGNORECASE|re.MULTILINE)\n ###### remove any blank images embedded in document\n self.source = re.sub(r'((\\!\\[\\]\\(data:\\)))',r' ',self.source,flags=re.IGNORECASE|re.MULTILINE)\n ###### remove starting digits and '#' symbols\n # self.source = re.sub(r'(^\\s*[^a-zA-Z]+\\s*)',r' ',self.source,flags=re.IGNORECASE|re.MULTILINE)\n ###### remove one or more whitespaces from the beginning and end of each line\n self.source = re.sub(r'(^( +)|( +)$)',r' ',self.source,flags=re.IGNORECASE|re.MULTILINE)\n ###### remove multiple new lines\n self.source = re.sub(r'(\\n+)',r'\\n',self.source,flags=re.IGNORECASE|re.MULTILINE)\n ############ remove whitespace with single space\n self.source = re.sub(' +',' ',self.source,flags=re.IGNORECASE|re.MULTILINE)\n ######## remove line containg numbers followed by dot(.)\n self.source = re.sub(r'(^\\s*)(\\d+.)(\\r*\\n)',r' ',self.source,flags=re.IGNORECASE|re.MULTILINE)\n self.source = re.sub(r'"',r' ',self.source,flags=re.IGNORECASE|re.MULTILINE)\n self.source = re.sub(r'x22;', r' ', self.source, flags=re.IGNORECASE | re.MULTILINE)\n self.source = re.sub(r'"', r' ', self.source, flags=re.IGNORECASE | re.MULTILINE)\n self.source = re.sub(r''', r' ', self.source, flags=re.IGNORECASE | re.MULTILINE)\n self.source = re.sub(r'x27;', r' ', self.source, flags=re.IGNORECASE | re.MULTILINE)\n self.source = re.sub(r'\\\\\\\\', r' ', self.source, flags=re.IGNORECASE | re.MULTILINE)\n self.source = re.sub(r'//', r' ', self.source, flags=re.IGNORECASE | re.MULTILINE)\n junk_list = ['xa4', 'xa4', 'xa7', 'xe6', 'xa9', 'xa6', 'xa8', 'u201a', 'u02dc', 'xbf', 'xba', 'u02dc', 'u2014',\n 'u2026', 'u2020', 'xbc', 'xef', 'u2022', 'u2122', 'xe5', 'xbd', 'x9d', 'xe7', 'xe2', 'xb8', 'xe8',\n 'u017d', 'xb4', 'xae', 'x90', 'xbb', 'xa5', 'u0192', 'xa0', 'u2018', 'xe9', 'u201c', 'xb5',\n 'u2013', 'xb6', 'u203a', 'u20ac', 'xb7', 'u0153', 'xb0', 'u201e', 'xc3', 'xaf', 'xa1', 'xe4',\n 'xad', 'xa2', 'xc2', 'xb9', 'u2026 - ']\n for j in junk_list:\n self.source = re.sub(j, r' ', self.source, flags=re.IGNORECASE | re.MULTILINE)\n self.source = re.sub(j, r' ', self.source, flags=re.IGNORECASE | re.MULTILINE)\n\n return self.source\n\n def getdata(self):\n return self.source\n\n\n# if __name__=='__main__':\n# filepath = r\"jobs_monster_skills\"\n# obj = Parse()\n# data = obj.parse(\"Archangel School, Kullu, Himachal Pradesh A rch a ngel School All rights reserved © Archangel School, Ragunathpur, Kullu (Himachal Pradesh) For God and Country Contact Us: School’s Postal Address: Archangel School, Raghunathpur, KULLU Himachal Pradesh (India) Pic Code: 175101 Phone: 01902-225566 Email: archangelschool (a) outlook.in Quick Links: Activities Photo Gallery Chairman's Message Principal's Message Vision and Mission School History Management and Teachers Location of School\".encode(\"ascii\",'ignore'))\n# print (data)\n # import ast\n # data=ast.literal_eval(open(filepath,\"r\").read())\n # newlist=[]\n # for d in data:\n # newlist.append(obj.parse(d))\n # with open(\"jobs_monster_skills3\", \"w\")as fileobj:\n # fileobj.write(str(newlist))\n\n# import spacy, random\n#\n# nlp = spacy.load(\"en_core_web_lg\")\n# doc = nlp(u\"SOFTWARE ENGINEER 500\")\n# print doc.ents\n# for k in doc.ents:\n# print k.text, k.label_, k.start_char, k.end_char","sub_path":"PDFExtractionNew/src/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":4765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"93250233","text":"from life_log.lifelogger import utils as utils\nfrom life_log.models import Event\nfrom django.contrib.auth.models import User\n\nraw1 = utils.converter('/webapps/thor/thor_app/assets/lanahanj_data.csv')\nraw2 = utils.converter('/webapps/thor/thor_app/assets/lanahanj_data2.csv')\n\nraws = raw1 + raw2\n\nevents = []\nuser = User.objects.get(username='thor')\nfor r in raws:\n events += [Event(user=user, raw=r)]\n events[-1].full_clean()\n events[-1].save()\n","sub_path":"data_dump.py","file_name":"data_dump.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"574858048","text":"#CREATE ECDF\ndef ecdf(data):\n \"\"\"Compute ECDF for a one-dimensional array of measurements.\"\"\"\n # Number of data points: n\n n = len(data)\n\n # x-data for the ECDF: x\n x = np.sort(data)\n\n # y-data for the ECDF: y\n y = np.arange(1, n+1) / n\n\n return x, y\n# Compute ECDF for versicolor data: x_vers, y_vers\nx_vers, y_vers = ecdf(versicolor_petal_length)\n\n# Generate plot\nplt.plot(x_vers, y_vers, marker=\".\", linestyle=\"none\")\n\n# Label the axes\nplt.ylabel(\"ECDF\")\nplt.xlabel(\"Length\")\n\n\n# Display the plot\nplt.show()\n\n#PETAL LENGTH ECDF GRAPHS\n# Compute ECDFs\nx_set, y_set = ecdf(setosa_petal_length)\nx_vers, y_vers = ecdf(versicolor_petal_length)\nx_virg, y_virg = ecdf(virginica_petal_length)\n\n\n# Plot all ECDFs on the same plot\nplt.plot(x_set, y_set, marker=\".\", linestyle = \"none\")\nplt.plot(x_vers, y_vers, marker=\".\", linestyle = \"none\")\nplt.plot(x_virg, y_virg, marker=\".\", linestyle = \"none\")\n\n\n# Annotate the plot\nplt.legend(('setosa', 'versicolor', 'virginica'), loc='lower right')\n_ = plt.xlabel('petal length (cm)')\n_ = plt.ylabel('ECDF')\n\n# Display the plot\nplt.show()\n\n#PERCENTILES AND BOX PLOTS\n# Specify array of percentiles: percentiles\npercentiles = np.array([2.5, 25, 50, 75, 97.5])\n\n# Compute percentiles: ptiles_vers\nptiles_vers = np.percentile(versicolor_petal_length, percentiles)\n\n# Print the result\nprint(ptiles_vers)\n\n#ADDING PERCENTILES TO ECDF GRAPH\n# Plot the ECDF\n_ = plt.plot(x_vers, y_vers, '.')\n_ = plt.xlabel('petal length (cm)')\n_ = plt.ylabel('ECDF')\n\n# Overlay percentiles as red diamonds.\n_ = plt.plot(ptiles_vers, percentiles/100, marker='D', color='red',\n linestyle='none')\n\n# Show the plot\nplt.show()\n\n#BOXPLOT\n# Create box plot with Seaborn's default settings\n_ = sns.boxplot(x='species', y='petal length (cm)', data= df)\n\n# Label the axes\n_= plt.xlabel('species')\n_= plt.ylabel('petal length (cm)')\n\n\n# Show the plot\n\nplt.show()\n\n#VARIANCE AND STDEV\n# Array of differences to mean: differences\ndifferences = versicolor_petal_length - np.mean(versicolor_petal_length)\n\n# Square the differences: diff_sq\ndiff_sq = differences**2\n\n# Compute the mean square difference: variance_explicit\nvariance_explicit = np.mean(diff_sq)\n\n# Compute the variance using NumPy: variance_np\nvariance_np = np.var(versicolor_petal_length)\n\n# Print the results\nprint(variance_explicit, variance_np)\n\n#STANDARD DEVIATION\n# Compute the variance: variance\nvariance = np.var(versicolor_petal_length)\n\n# Print the square root of the variance\nprint(np.sqrt(variance))\n\n# Print the standard deviation\nprint(np.std(versicolor_petal_length))\n\n#COVARIANCE\n# Compute the covariance matrix: covariance_matrix\ncovariance_matrix = np.cov(versicolor_petal_length, versicolor_petal_width)\n\n# Print covariance matrix\nprint(covariance_matrix)\n\n# Extract covariance of length and width of petals: petal_cov\npetal_cov = covariance_matrix[0,1]\n\n# Print the length/width covariance\nprint(covariance_matrix[1,0])\n\n#PEARSON COEFFICIENT\ndef pearson_r(x, y):\n \"\"\"Compute Pearson correlation coefficient between two arrays.\"\"\"\n # Compute correlation matrix: corr_mat\n corr_mat = np.corrcoef(x,y)\n\n # Return entry [0,1]\n return corr_mat[0,1]\n\n# Compute Pearson correlation coefficient for I. versicolor: r\nr = pearson_r(versicolor_petal_length, versicolor_petal_width)\n\n# Print the result\nprint(r)\n\n\n#PROBABILITY\n# Seed the random number generator\nnp.random.seed(42)\n\n# Initialize random numbers: random_numbers\nrandom_numbers = np.empty(100000)\n\n# Generate random numbers by looping over range(100000)\nfor i in range(100000):\n random_numbers[i] = np.random.random()\n\n# Plot a histogram\n_ = plt.hist(random_numbers)\n\n# Show the plot\nplt.show()\n\ndef perform_bernoulli_trials(n, p):\n \"\"\"Perform n Bernoulli trials with success probability p\n and return number of successes.\"\"\"\n # Initialize number of successes: n_success\n n_success = 0\n # Perform trials\n for i in range(n):\n # Choose random number between zero and one: random_number\n random_number = np.random.random()\n\n # If less than p, it's a success so add one to n_success\n if random_number < p:\n n_success += 1\n return n_success\n\n#BERNOUILLI TRIAL\n# Seed random number generator\nnp.random.seed(42)\n\n# Initialize the number of defaults: n_defaults\nn_defaults = np.empty(1000)\n\n# Compute the number of defaults\nfor i in range(1000):\n n_defaults[i] = perform_bernoulli_trials(100, 0.05)\n\n\n# Plot the histogram with default number of bins; label your axes\n_ = plt.hist(n_defaults, normed=True)\n_ = plt.xlabel('number of defaults out of 100 loans')\n_ = plt.ylabel('probability')\n\n# Show the plot\nplt.show()\n\n#BERNOUILLI\n# Compute ECDF: x, y\nx,y = ecdf(n_defaults)\n\n# Plot the ECDF with labeled axes\nplt.plot(x, y, marker= '.', linestyle='none')\nplt.xlabel('Probability')\nplt.ylabel('No. Defaults')\n\n\n# Show the plot\nplt.show()\n\n# Compute the number of 100-loan simulations with 10 or more defaults: n_lose_money\nn_lose_money = np.sum(n_defaults >= 10)\n\n\n# Compute and print probability of losing money\nprint('Probability of losing money =', n_lose_money / len(n_defaults))\n\n#PLOTTING BINOMIAL DISTRIBUTION\n# Take 10,000 samples out of the binomial distribution: n_defaults\nn_defaults = np.random.binomial(100, 0.05, size= 10000)\n\n# Compute CDF: x, y\nx,y = ecdf(n_defaults)\n\n# Plot the CDF with axis labels\nplt.plot(x,y, marker=\".\", linestyle=\"none\")\nplt.xlabel('No. of Defaults')\nplt.ylabel('CDF')\nplt.show()\n\n#BINOMIAL DISTRIBUTION PLOTTING HISTOGRAMS\n# Compute bin edges: bins\nbins = np.arange(0, max(n_defaults) + 1.5) - 0.5\n\nplt.hist(n_defaults, normed=True, bins=bins)\n\n# Label axes\nplt.xlabel('No. Defaults')\nplt.ylabel('notsure')\n\n# Show the plot\nplt.show()\n\n#POISSON DISTRIBUTION\n# Draw 10,000 samples out of Poisson distribution: samples_poisson\nsamples_poisson = np.random.poisson(10, size=10000)\n\n# Print the mean and standard deviation\nprint('Poisson: ', np.mean(samples_poisson),\n np.std(samples_poisson))\n\n# Specify values of n and p to consider for Binomial: n, p\nn = [20, 100, 1000]\np = [0.5, 0.1, 0.01]\n\n# Draw 10,000 samples for each n,p pair: samples_binomial\nfor i in range(3):\n samples_binomial = np.random.binomial(n[i], p[i], size=10000)\n\n # Print results\n print('n =', n[i], 'Binom:', np.mean(samples_binomial),\n np.std(samples_binomial))\n#POISSON PROBABILITY\n# Draw 10,000 samples out of Poisson distribution: n_nohitters\nn_nohitters = np.random.poisson(251/115, size = 10000)\n\n# Compute number of samples that are seven or greater: n_large\nn_large = np.sum(n_nohitters >=7)\n\n# Compute probability of getting seven or more: p_large\np_large = n_large/10000\n\n# Print the result\nprint('Probability of seven or more no-hitters:', p_large)\n\n\n#NORMAL DISTRIBUTIONS\n# Draw 100000 samples from Normal distribution with stds of interest: samples_std1, samples_std3, samples_std10\nsamples_std1 = np.random.normal(20,1, size=100000)\nsamples_std3 = np.random.normal(20,3, size=100000)\nsamples_std10 = np.random.normal(20,10, size=100000)\n\n\n# Make histograms\nplt.hist(samples_std1, histtype=\"step\", normed=True, bins=100)\nplt.hist(samples_std3, histtype=\"step\", normed=True, bins=100)\nplt.hist(samples_std10, histtype=\"step\", normed=True, bins=100)\n\n\n# Make a legend, set limits and show plot\n_ = plt.legend(('std = 1', 'std = 3', 'std = 10'))\nplt.ylim(-0.01, 0.42)\nplt.show()\n\n#ECDF AND NORMAL DIST\n# Compute mean and standard deviation: mu, sigma\nmu = np.mean(belmont_no_outliers)\nsigma = np.std(belmont_no_outliers)\n\n# Sample out of a normal distribution with this mu and sigma: samples\nsamples = np.random.normal(mu, sigma, size=10000)\n\n# Get the CDF of the samples and of the data\nx, y = ecdf(belmont_no_outliers)\nx_theor, y_theor = ecdf(samples)\n\n\n# Plot the CDFs and show the plot\n_ = plt.plot(x_theor, y_theor)\n_ = plt.plot(x, y, marker='.', linestyle='none')\n_ = plt.xlabel('Belmont winning time (sec.)')\n_ = plt.ylabel('CDF')\nplt.show()\n\n\n#PART TWO\n# Take a million samples out of the Normal distribution: samples\nsamples=np.random.normal(mu, sigma, size=1000000)\n\n# Compute the fraction that are faster than 144 seconds: prob\nprob = np.sum(samples>144)/1000000\n\n# Print the result\nprint('Probability of besting Secretariat:', prob)\n\n#EXPONENTIAL MODELLING\ndef successive_poisson(tau1, tau2, size=1):\n \"\"\"Compute time for arrival of 2 successive Poisson processes.\"\"\"\n # Draw samples out of first exponential distribution: t1\n t1 = np.random.exponential(scale=tau1, size=size)\n\n # Draw samples out of second exponential distribution: t2\n t2 = np.random.exponential(scale=tau2, size=size)\n\n return t1 + t2\n\n#PART TWO\n# Draw samples of waiting times: waiting_times\nwaiting_times = successive_poisson(764, 715, 100000)\n\n# Make the histogram\nplt.hist(waiting_times, normed=True, bins=100, histtype='step')\n\n\n# Label axes\nplt.xlabel('Distribution')\nplt.ylabel('Time')\n\n\n# Show the plot\n\nplt.show()","sub_path":"StatisticalThinking1.py","file_name":"StatisticalThinking1.py","file_ext":"py","file_size_in_byte":8946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"273541815","text":"\"\"\"\nUtilities related to the functionality of MATLAB's buffer function.\n\"\"\"\nfrom utilities.typingutils import is_int\n\n\ndef get_noof_windows(n: int, wsize: int, wstep: int) -> int:\n assert isinstance(n, int)\n assert isinstance(wsize, int)\n assert isinstance(wstep, int)\n\n if n < wsize:\n return 0\n\n return int((n - wsize) / wstep) + 1\n\n\ndef get_window_timestamp(wsize_sec: float, wstep_sec: float, i: int) -> [float, float]:\n assert isinstance(wsize_sec, float)\n assert isinstance(wstep_sec, float)\n assert is_int(i)\n\n t_start: float = wstep_sec * i\n t_stop: float = t_start + wsize_sec\n\n return t_start, t_stop\n","sub_path":"src/utilities/bufferutils.py","file_name":"bufferutils.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"75319034","text":"import os\npath ='C:/Users/Анастасия/Desktop/ITMO/original_acs_records/data/'\nos.chdir(path)\n\n\nfiles = os.listdir(\".\")\n\n##xml = filter(lambda x: x.endswith('.xml'), files)\n##print (files)\n\n# My filter\nfiles_xml = []\n\nfor each in files:\n if each[len(each)-3:] == 'xml':\n files_xml.append(each)\n\n\nfor name in files_xml:\n## print (' '+name)\n f = open(name,'r')\n key = ''\n\n ## Search name of case\n\n new_name = ''\n data = f.readline()\n while data != '':\n locus = data.find(key)\n if locus != -1:\n if data.find('С2014') != -1:\n## print(data)\n case = ''\n for char in data[locus+len(key):]:\n if char != '<':\n case += char\n else:\n case += ' '\n break\n new_name += case\n data = f.readline()\n new_name = new_name[:len(new_name)-1]+'.xml'\n new_name = new_name.replace('/','_')\n## print (new_name)\n f.close()\n try:\n os.rename(name, 'C:\\\\Users\\\\Анастасия\\\\Desktop\\\\ITMO\\\\original_acs_records\\\\data\\\\Changed\\\\'+new_name)\n except FileExistsError:\n print ('COPY')\n os.remove(name)\n","sub_path":"original_acs_records/data/new name case.py","file_name":"new name case.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"506770703","text":"import numpy as np\nfrom numpy import empty\nimport math\nimport matplotlib.pyplot as plt\n\nimport random\nimport array as arr\nimport math\nfrom mpl_toolkits.mplot3d import Axes3D\nimport sys\n# from numba import jit\n\n\n\n## Set deep recursion limit\nsys.setrecursionlimit(50000)\n\n###########################################################################################################################\n\n## Experimental parameters\n\ngap = 0.5 #electrode gap in cm\nV_applied1 = 1000 #voltage applied in Volts\nV_applied2 = 2000 #voltage applied in Volts\nV_applied3 = 3000 #voltage applied in Volts\n\nE_field1 = V_applied1/gap #Electric field in V/cm\nE_field2 = V_applied2/gap #Electric field in V/cm\nE_field3 = V_applied3/gap #Electric field in V/cm\n\ngain = 2000000 #electrons per UV in PMT\ne_mob = 2.55797813e6*(750.062/142.85714286) #electron mobilty in gas from Austin's Calculations\npressure = 760 #pressure in Torr\n\n\n## Velocities show are caluclated by Austin McDonald in cm/s\ndrift_v1 = 416166.51896023441805 #e_mob*(E_field1/pressure) #drift velocity in cm/s [/mu*(E/p)]\ndrift_v2 = 776180.47799293056596 #e_mob*(E_field2/pressure) #drift velocity in cm/s [/mu*(E/p)]\ndrift_v3 = 1191274.1908784906846 #e_mob*(E_field3/pressure) #drift velocity in cm/s [/mu*(E/p)]\n\n\nNRG_UV = (3e8/128e-9)*6.626e-34 #energy of 128 nm UV photon in J\n\ntau1 = 11e-9 #time constant for singlet state in s\nk1 = 1/tau1\ntau2 = 3.2e-6 #time constant for triplet state in s\nk2 = 1/tau2 \n\n##########################################################################################################################\n\n############################################################################################################################ \n\n## Create representation for single ideal pulse\n\ntime = np.linspace(0,20e-6,num=40000)\ntime_pulse = np.linspace(0,50e-6, 500000)\n# Volts = np.zeros(300000, dtype = object) #Signal voltage\nIdeal_V = np.zeros(500000, dtype = object) #Ideal signal voltage\n\n\n###### Idealized pulse signal w/ V_max= 10e-3 V\n\nfor Q in range(0,len(time_pulse)-1):\n if time_pulse[Q] < 5.5e-10:\n Ideal_V[Q] = 0\n Ideal_V[Q] = -1*Ideal_V[Q]\n# if time[Q] == 5.5e-10:\n# Ideal_V[Q] = -10e-3\n# Ideal_V[Q] = -1*Ideal_V[Q]\n if time_pulse[Q] >= 5.5e-10:\n Ideal_V[Q] = 5e-3*(k1*math.exp(-k1*(time_pulse[Q]-5.5e-10)) + (1/math.sqrt(3))*k2*math.exp(-k2*(time_pulse[Q]-5.5e-10)))\n# Ideal_V[Q] = -1*Ideal_V[Q]\n\n\n###########################################################################################################################\n###########################################################################################################################\n\n#### Functions for the code to use\n\n###########################################################################################################################\n\n## Function to create new initial positions for created electrons\n## Set for creation of 3 every time\n\ndef elec_gen(Q):\n \n R = np.linspace(0, 4.75, 360) #create representation of electrodes\n u = np.linspace(0, 2*np.pi, 360)\n x = np.outer(R, np.cos(u))\n y = np.outer(R, np.sin(u)) \n h1 = np.outer(1.5*np.ones(np.size(u)), np.ones(np.size(u)))\n h2 = np.outer(2.0*np.ones(np.size(u)), np.ones(np.size(u)))\n\n elec = np.zeros([3,3]) #3 electrons on cathode\n random_ang = np.random.uniform(0,2*np.pi,size = 3)\n\n for Q in range(0,3): #initialize electron distribution on cathode\n for R in range(0, len(elec[Q])):\n if Q == 0: \n elec[Q][R] = random.uniform(-4.75*np.cos(random_ang[R]),4.75*np.cos(random_ang[R])) #initial x\n if Q == 1:\n elec[Q][R] = random.uniform(-4.75*np.sin(random_ang[R]),4.75*np.sin(random_ang[R])) #initial y\n if Q == 2:\n elec[Q][R] = 1.5\n elec_drift[R][0] = 1.5 #initial z\n \n return elec_drift[Q][0]\n############################################################################################################################\n\n## Feedback loop function for generation of secondary electrons/UV\n## Uses random numbers to determine the creation or not of UV \n## Currently set for arbitrary probabilities\n\ndef feedback(start_t):\n \n UV_prod_total = 0\n UV_PMT_total = 0\n UV_cath_total = 0\n\n UV_PMT_time3 = arr.array('d')\n UV_PMT_time4 = arr.array('d')\n UV_PMT_time5 = arr.array('d')\n \n print(\"Entering time loop\")\n \n \n UV_cath_time3 = arr.array('d')\n\n end_t = start_t + 20e-6\n \n# print(end_t)\n \n time2 = np.linspace(start_t,end_t,num=40000)\n \n elec_drift2 = np.zeros([3,40000])\n \n for Q in range(0,3):\n elec_drift2[Q][0] = 1.5\n# print(elec_drift2[Q][0])\n \n for Q in range(0,len(time2)-1):\n \n for T in range(0,3):\n elec_drift2[T][Q] = elec_drift2[T][0]+drift_v1*time2[Q]\n \n \n if elec_drift2[T][Q] <= 2.0 and time2[Q] <= 20e-6: \n UV_rand = np.random.randint(1,5,size = 3) #prob of photon produced (20e-6s/40e3 intervals)*1000 photons/cm*416166cm/s = 0.2 (1/5) photons/interval\n UV_prod = 0\n\n for R in range(0,len(UV_rand)):\n if UV_rand[R] == 1: # ~0.25 (1/4) chance of UV production\n UV_prod += 1\n UV_prod_total += 1\n\n UV_rand_2 = np.random.randint(1,301,size = UV_prod)\n UV_cath = 0\n UV_PMT = 0\n\n for S in range(0,len(UV_rand_2)):\n\n if UV_rand_2[S] > 150: # ~0.5 (1/2) prob that UV is detected\n# print(\"UV detected\")\n UV_PMT_time3.append(time2[Q]) #time that UV is detected\n# print(time[Q])\n UV_PMT += 1\n UV_PMT_total += 1\n \n \n if UV_rand_2[S] == 1: # ~0.0033 (1/300) prob that UV goes to cathode\n print(\"UV hits cathode - Secondary\") \n UV_cath_time.append(time2[Q]) #time that UV is sent to cathode\n UV_cath += 1\n UV_cath_total += 1\n\n UV_PMT_time4 = feedback(time2[Q]) # More feedback loops\n UV_PMT_time5 = UV_PMT_time3 + UV_PMT_time4 # Concatenate feedack loops\n \n print(\"Exit time loop\")\n \n return(UV_PMT_time5) \n\n######################################################################################################################################\n\n## Define function to match indices of detected time and pulse time\ndef matchindx(time_det): \t\t\t\t\n \n indx = 0\n \n for Q in range(0,len(time)-1):\n \n if time_pulse[Q] <= time_det:\n indx = Q\n \n return(indx)\n\n######################################################################################################################################\n\n## Define function to calculate mplitude based on # of photons detected\ndef CalcAmplitude(time,time2,UV_cnt):\n \n if time == 0:\n \t return 0\n \t \n else:\n \tNRG_UV = (3e8/128e-9)*6.626e-34 #energy of 128 nm UV photon in J\n \tIavg = 0.1e-3 #Avg current in PMT\n \tgain = 2000000\n \tAmpl = (gain*UV_cnt*NRG_UV)/((time2-time)*Iavg) \n \n \treturn Ampl\n\n#####################################################################################################################################\n##############################################################################################################################\n\n#### Here's where the fun begins\n\n## Create representation of electrodes within Teapot\n## Not necessary but helps to visualize initial position of electrons\n\nR = np.linspace(0, 4.75, 360) \nu = np.linspace(0, 2*np.pi, 360)\nx = np.outer(R, np.cos(u))\ny = np.outer(R, np.sin(u)) \nh1 = np.outer(1.5*np.ones(np.size(u)), np.ones(np.size(u)))\nh2 = np.outer(2.0*np.ones(np.size(u)), np.ones(np.size(u)))\n\n\n# Create 3 electrons on anode\nelec = np.zeros([3,3])\n\n\n# Initialize z drift position\n## ex: np.zeros([num of e, # of intervals]) --> time intervals related to probability of UV creation\n## --> prob of photon produced (20e-6s/40e3 intervals) * 1000 photons/cm * 416166cm/s = 0.2 photons/interval\n\n# elec_drift = np.zeros([360,2000]) \nelec_drift1 = np.zeros([3,40000]) \nelec_drift2 = np.zeros([3,40000]) \nelec_drift3 = np.zeros([3,40000]) \n\n\n## Create Random initial position for electrons\n\nrandom_ang = np.random.uniform(0,2*np.pi,size = 3)\n\nfor Q in range(0,3): \n for R in range(0, len(elec[Q])):\n if Q == 0: \n elec[Q][R] = random.uniform(-4.75*np.cos(random_ang[R]),4.75*np.cos(random_ang[R])) #initial x\n if Q == 1:\n elec[Q][R] = random.uniform(-4.75*np.sin(random_ang[R]),4.75*np.sin(random_ang[R])) #initial y\n# if Q == 2: ## hardset initial z on cathode \n# elec[Q][R] = 1.5\n# elec_drift[R][0] = 1.5 \n if Q == 2:\n elec[Q][R] = random.uniform(1.5,2.0)\n elec_drift1[R][0] = elec[Q][R] #initial z1\n elec_drift2[R][0] = elec[Q][R] #initial z2\n elec_drift3[R][0] = elec[Q][R] #initial z3\n\nUV_prod_total = 0\nUV_PMT_total = 0\nUV_cath_total = 0\n\nUV_PMT_time = arr.array('d')\nUV_PMT_time2 = arr.array('d')\n\nUV_cath_time = arr.array('d')\n\n### Start drift of electrons/ allow probability to induce feedback loop\n## Count UVs by keeping track of when they are observed\n\nfor Q in range(0,len(time)-1):\n \n for T in range(0,3):\n elec_drift1[T][Q] = elec_drift1[T][0]+drift_v1*time[Q]\n \n if elec_drift1[T][Q] <= 2: \n UV_rand = np.random.randint(1,5,size = 3) #prob of photon produced (20e-6s/40e3 intervals)*1000 photons/cm*416166cm/s = 0.2 photons/interval\n UV_prod = 0\n\n for R in range(0,len(UV_rand)):\n if UV_rand[R] == 1: # ~0.25 chance of UV production\n UV_prod += 1\n UV_prod_total += 1\n\n UV_rand_2 = np.random.randint(1,301,size = UV_prod)\n UV_cath = 0\n UV_PMT = 0\n\n for S in range(0,len(UV_rand_2)):\n\n if UV_rand_2[S] > 150: # ~0.5 prob that UV is detected\n # print(\"UV detected\")\n UV_PMT_time.append(time[Q]) #time that UV is detected\n UV_PMT += 1\n UV_PMT_total += 1 \n\n if UV_rand_2[S] == 1 : # ~0.01 prob that UV goes to cathode\n print(\"UV hits cathode - primary\") \n UV_cath_time.append(time[Q]) #time that UV is sent to cathode\n UV_cath += 1\n UV_cath_total += 1 \n\n UV_PMT_time2 = feedback(time[Q])\n UV_PMT_time = UV_PMT_time + UV_PMT_time2 \n \n############################################################################################################################\n\n## Convert UV counts into pulses\n\npulse = np.zeros(500000, dtype = float)\n\n \nUV_counts, UV_bins, UV_bars = plt.hist(UV_PMT_time, bins = 300, histtype = 'bar')\nplt.title('UV detection : 1:300 prob of drift-induced feedback')\nplt.xlabel('Time (s)',fontsize=12)\nplt.ylabel('Counts',fontsize=12)\nplt.savefig('/Users/neutrino/Ryne/rdingler/ELsim_results/feedback_loop_1in300_UVhist_1000V_test_new110719_gain2M_instW.pdf') \n\n\nUV_tot = 0 \n\nfor R in range(0,len(UV_counts)-1):\n\n UV_cnt = UV_counts[R]\n# UV_tot += UV_cnt\n \n if UV_cnt != 0:\n print(\"%i UV photons detected at time %.10E\" %(UV_cnt,UV_bins[R]))\n time_det = UV_bins[R]\n indx = matchindx(time_det)\n# print(indx)\n\n for Q in range(indx,len(time_pulse)-2):\n \tAmpl = 1 \t\t\t\t\t#initialize amplitude\n \tAmpl = CalcAmplitude(time_pulse[Q],time_pulse[Q+1],UV_cnt)\n \tpulse[Q] = pulse[Q] + Ampl* (k1*math.exp(-k1*(time_pulse[Q]-time_det)) + 5*k2*math.exp(-k2*(time_pulse[Q]-time_det)))\n# print(pulse[Q])\n\n# for Q in range(0,len(pulse)-1):\n# pulse[Q] = -1*pulse[Q]\n\nprint(\"DONE!\")\n\n########################################################################################################################\n## Normalization parameters\n\nsum_Ar_0000V = np.sum(Ideal_V)\nsum_Ar = np.sum(pulse)\n\n## rate values collected manually \n\nrate_Ar_0000V = 51231/120 #18046/120 49392/120 \nrate_Ar_1000V = 130680/120 #49585/120 18046/120\nrate_Ar_2000V = 270241/120 #18046/120 49392/120 \nrate_Ar_3000V = 151443/120 #49585/120 18046/120\n\n##########################################################################################################################\n\n## Make some plots\n\nf, ax = plt.subplots(1)\n\n# ax.plot(time_pulse, pulse)\n# ax.plot(time_pulse, Ideal_V)\n\n# ax.plot(time_pulse, pulse/sum_Ar_0000V*rate_Ar_0000V)\nax.plot(time_pulse, pulse*(rate_Ar_1000V/sum_Ar))\n# ax.plot(time_pulse, pulse/sum_Ar*rate_Ar_2000 V)\n# ax.plot(time_pulse, pulse/sum_Ar*rate_Ar_3000V)\n\nax.plot(time_pulse, Ideal_V*(rate_Ar_0000V/sum_Ar_0000V))\n\nax.legend(('Pulse with feedback, 1000V','Pulse w/o feedback'), loc = 'upper right', prop={'size':8})\n\n# ax.legend(('Pulse with feedback, 1000V','Pulse with feedback, 2000V','Pulse with feedback, 3000V','Pulse w/o feedback'), loc = 'upper right', prop={'size':8})\n\nplt.suptitle('Feedback Pulses : 1:300 prob of drift-induced feedback')\n# plt.title('Feedback Pulses : 1:300 prob of drift-induced feedback')\nplt.title('Normalized by pulse rate')\n# plt.title('Unnormalized')\n\n\nplt.xlabel('Time (s)',fontsize=12)\nplt.ticklabel_format(axis='x', style='sci', scilimits=(-3,3))\n# plt.ylabel('Voltage (V) - logscale',fontsize=12)\nplt.ylabel('Arbitrary Units',fontsize=12)\n\nplt.yscale('log')\n\n\nplt.savefig('/Users/neutrino/Ryne/rdingler/ELsim_results/integrated_feedback_loop_1in300_Normalized1000V_adjustedvelocity_test_new110719_gain2M_instW.pdf') \n# plt.savefig('/Users/neutrino/Ryne/rdingler/ELsim_results/integrated_feedback_loop_1in300_unormalized.pdf') \n","sub_path":"feedback_adjV_Nov2019.py","file_name":"feedback_adjV_Nov2019.py","file_ext":"py","file_size_in_byte":15146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"257854047","text":"import os\nimport pytest\n\nfrom ._common import TmuxSession\nfrom ._common import container_runtime_or_fail\n\nEXECUTION_MODES = [\"interactive\", \"stdout\"]\n\n\n@pytest.fixture(scope=\"session\")\ndef test_fixtures_dir():\n return os.path.join(os.path.dirname(__file__), \"..\", \"fixtures\")\n\n\n@pytest.fixture(scope=\"session\", name=\"container_runtime_or_fail\")\ndef fixture_container_runtime_or_fail():\n \"\"\"check if container runtime is available\"\"\"\n yield container_runtime_or_fail\n\n\n@pytest.fixture\ndef patch_curses(monkeypatch):\n \"\"\"patch curses so it doesn't Traceback during tests\"\"\"\n # pylint: disable=import-outside-toplevel\n import curses\n\n monkeypatch.setattr(curses, \"cbreak\", lambda: None)\n monkeypatch.setattr(curses, \"nocbreak\", lambda: None)\n monkeypatch.setattr(curses, \"endwin\", lambda: None)\n","sub_path":"tests/integration/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"458371980","text":"def qiuhe(a):\n\tc = 0\n\tif a > 0:\n\t\tfor i in range(1,a+1):\n\t\t\tc+=i\n\t\tprint(\"1到%d的和为%d\"%(a,c))\n\telse:\n\t\tprint(\"错误\")\nsum = int(input(\"请输入值:\"))\nqiuhe(sum)\n\t\n\t\n","sub_path":"15day/canshu.py","file_name":"canshu.py","file_ext":"py","file_size_in_byte":174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"303554782","text":"\"\"\"\nImplement a Stack\n\n~ PROBLEM ~\n\nA very common interview question is to begin by just implementing a Stack! Try your best to implement your own stack!\n\nIt should have the methods:\n\n- Check if its empty\n- Push a new item\n- Pop an item\n- Peek at the top item\n- Return the size\n\n~ SOLUTION ~\n\n- N/A\n\n~ TAKEAWAY(S) ~\n\n- N/A\n\"\"\"\n\nclass Stack(object):\n \n def __init__(self):\n self.items = []\n \n def push(self, item):\n self.items.append(item)\n \n def pop(self):\n try:\n return self.items.pop()\n except:\n print('Stack is empty')\n \n def peek(self):\n print(self.items[len(self.items) - 1])\n \n def size(self):\n return len(self.items)\n \n def is_empty(self):\n return len(self.items) == 0\n\ndef main():\n s = Stack()\n\n print(s.is_empty())\n s.push('one')\n print(s.is_empty())\n print(s.pop())\n s.push('one')\n s.push('two')\n s.push('three')\n s.push('four')\n s.push('five')\n print(s.size())\n print(s.pop())\n print(s.pop())\n print(s.pop())\n print(s.pop())\n print(s.pop())\n print(s.pop())\n\nif __name__ == '__main__':\n main()","sub_path":"py-dsa/stacks_queues_deques/stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"361959867","text":"\r\nimport copy, re, json, random, time\r\nfrom core import argv, dbop, files, urlpy\r\nfrom urllib import parse, request as ureq\r\nfrom pyquery import PyQuery as pyq\r\n\r\ndef fixSale(db, act):\r\n\r\n cmin = int(cfg('pagemin'))\r\n cmax = int(cfg('pagemax'))\r\n data = {}\r\n \r\n for ipg in range(cmin, cmax):\r\n page = str(ipg);\r\n dmkey = 'div.houselist'\r\n url = liurl(page)\r\n itms = ritms(url, dmkey)\r\n html = ritms(url, 0)\r\n for i in itms:\r\n info = pyq(i).find('.info')\r\n if not info:\r\n continue\r\n h3 = pyq(info).find('h3')\r\n url = pyq(h3).find('a').attr('href')\r\n fid = url.split('/')[6]; # fid = url.split('=')[1];\r\n tmp = pyq(h3).find('i').attr('class');\r\n ccid18 = tmp.replace('btn btn-xs dais-','');\r\n print(fid); print(ccid18);\r\n db.exe(\"UPDATE {url} SET tag1='\"+ccid18+\"' WHERE fid='\"+fid+\"'\")\r\n \r\n data['res'] = 'res'\r\n return data\r\n\r\ndef img(db, act):\r\n cbat = cfg('delimit')\r\n offset = random.randint(5, 15)\r\n limit = \" LIMIT \"+str(offset)+\",\"+cbat+\" \"\r\n data = {'_end':'-', '_fids':''}\r\n res = {}\r\n\r\n if act=='view':\r\n res = db.get(\"SELECT * FROM {img} ORDER BY id \"+limit+\"\")\r\n elif act=='test':\r\n itms = db.get(\"SELECT * FROM {url} ORDER BY id \"+limit+\"\")\r\n for row in itms:\r\n fid = row['fid']\r\n res[fid] = imgp(db, act, row)\r\n data['_fids'] += fid + ','\r\n elif act=='done':\r\n itms = db.get(\"SELECT * FROM {url} WHERE f2=0 ORDER BY id LIMIT \"+cbat)\r\n if not itms:\r\n data['_end'] = 1\r\n for row in itms:\r\n fid = row['fid']\r\n res[fid] = imgp(db, act, row)\r\n data['_fids'] += fid + ','\r\n else: # save\r\n itms = db.get(\"SELECT * FROM {img} WHERE f1=0 ORDER BY id LIMIT \"+cbat)\r\n if not itms:\r\n data['_end'] = 1\r\n for row in itms:\r\n fid = row['fid']\r\n res[fid] = imgs(db, act, row)\r\n #print('1')\r\n #time.sleep(3)\r\n #print('2')\r\n data['_fids'] += fid + ','\r\n data['res'] = res\r\n return data\r\n\r\ndef imgs(db, act, row):\r\n url = row['thumb']\r\n file = urlpy.svurl(url, 'pics')\r\n # 放前面???\r\n db.exe(\"UPDATE {img} SET f1=1 WHERE fid='\"+str(row['fid'])+\"'\")\r\n return file\r\n\r\ndef imgp(db, act, row):\r\n\r\n baseurl = argv.cfgs['cjcfg']['baseurl']\r\n ubase = baseurl + '/index.php?/ajax/pageload/aj_model/a%2C{chid}/aj_check/1/aj_pagenum/1/aj_pagesize/180/aj_nodemode/0/aj_thumb/thumb%2C184%2C134/aj_whrfields/pid3%2C%3D%2C{aid}%3Bshi%2C%3D%2C0/callback/'\r\n data = {}\r\n pcaids = {'7':'相册图', '11':'户型图'}\r\n data['pcaids'] = pcaids\r\n\r\n for pcaid in pcaids:\r\n #if pcaid=='900': # &count=false&city=东莞\r\n url = ubase.replace('{chid}',pcaid).replace('{aid}',row['fid'])\r\n html = ritms(url, 0)\r\n itms = json.loads(html)\r\n pdic = {}; no = 0\r\n for itm in itms:\r\n if type(itm)==str:\r\n print('Error:'+itm)\r\n continue\r\n pid = itm['aid']\r\n pdic[pid] = {'fid':pid, 'pcaid':pcaid}\r\n pdic[pid]['title'] = itm['catalog']\r\n pdic[pid]['thumb'] = itm['thumbOrg']\r\n if itm['caid']=='11':\r\n tags = {}\r\n tags['ccid12'] = itm['ccid12']\r\n tags['shi'] = itm['shi']\r\n tags['ting'] = itm['ting']\r\n tags['wei'] = itm['wei']\r\n tags['chu'] = itm['chu']\r\n pdic[pid]['tags'] = tags\r\n pdic[pid]['price'] = itm['dj']\r\n pdic[pid]['area'] = itm['mj']\r\n else:\r\n pdic[pid]['tags'] = {}\r\n pdic[pid]['price'] = '0'\r\n pdic[pid]['area'] = '0'\r\n # save\r\n sql = \"SELECT * FROM {img} WHERE fid=%s\"\r\n urow = db.get(sql, (pid,),1) #str.strip([chars])\r\n flval = (pid, row['fid'], pdic[pid]['title'], pcaid, pdic[pid]['price'], \r\n pdic[pid]['area'], pdic[pid]['thumb'], jdump(pdic[pid]['tags']))\r\n if not urow:\r\n flids = 'fid,pid,title,pcaid,price,area,thumb,tags'\r\n sql = \"INSERT INTO {img} (\"+flids+\") VALUES (%s,%s,%s,%s,%s,%s,%s,%s) \"\r\n pdic[pid]['_res'] = 'add';\r\n else:\r\n flids = \" fid=%s,pid=%s,title=%s,pcaid=%s,price=%s,area=%s,thumb=%s,tags=%s \"\r\n whr = \" WHERE fid='\"+str(urow['fid'])+\"' \"\r\n sql = \"UPDATE {img} SET\" + flids + whr\r\n pdic[pid]['_res'] = 'upd';\r\n #data['_res'] = 'add' if not urow else 'upd'\r\n if act=='done':\r\n db.exe(sql, flval)\r\n # for-return\r\n data['p'+pcaid] = pdic\r\n if act=='done':\r\n db.exe(\"UPDATE {url} SET f2=1 WHERE id='\"+str(row['id'])+\"'\")\r\n return data\r\n\r\ndef data(db, act):\r\n\r\n cbat = cfg('delimit')\r\n offset = random.randint(5, 15)\r\n limit = \" LIMIT \"+str(offset)+\",\"+cbat+\" \"\r\n data = {'_end':'-', '_fids':''}\r\n res = {}\r\n\r\n if act=='view':\r\n res = db.get(\"SELECT * FROM {data} ORDER BY id \"+limit+\"\")\r\n elif act=='done':\r\n itms = db.get(\"SELECT * FROM {url} WHERE f1=0 ORDER BY id LIMIT \"+cbat)\r\n if not itms:\r\n data['_end'] = 1\r\n for row in itms:\r\n fid = row['fid']\r\n res[fid] = datap(db, act, row)\r\n data['_fids'] += fid + ','\r\n else: # test\r\n itms = db.get(\"SELECT * FROM {url} ORDER BY id \"+limit+\"\")\r\n for row in itms:\r\n fid = row['fid']\r\n res[fid] = datap(db, act, row)\r\n data['_fids'] += fid + ','\r\n data['res'] = res\r\n return data\r\n\r\ndef datap(db, act, row):\r\n\r\n baseurl = argv.cfgs['cjcfg']['baseurl']\r\n data = {}\r\n fid = row['fid']\r\n rid = row['id']\r\n url = baseurl + '/archive.php?aid='+fid+'&addno=12'\r\n html = ritms(url, 0)\r\n\r\n data['detail'] = pyq(html).find('.xmjj').find('.lp-tabcon').html().replace(baseurl,'')\r\n data['equip'] = pyq(html).find('.xmpt').find('.lp-tabcon').html().replace(baseurl,'').replace('href','x')\r\n binfos = pyq(html).find('.lp-detail')\r\n\r\n dts = pyq(binfos).find('dt')\r\n dds = pyq(binfos).find('dd')\r\n base = {}\r\n no = 0\r\n for i in dts:\r\n key = pyq(i).text()\r\n val = pyq(binfos).find('dd').eq(no).text()\r\n key = key.replace(' ','').replace(' ','').replace(':','').replace(' ','');\r\n val = val.replace(' ','').replace(' ','').replace(':','').replace(' ','');\r\n if key: # && val\r\n base[key] = val\r\n no = no + 1\r\n data['base'] = base\r\n '''\r\n '''\r\n data['sale'] = ''\r\n data['xiaoqu'] = ''\r\n data['temp'] = ''\r\n # save\r\n sql = \"SELECT * FROM {data} WHERE id=%s\"\r\n urow = db.get(sql, (rid,),1) #str.strip([chars])\r\n flids = 'id,detail,equip,info_base,info_sale,info_xiaoqu,info_temp'\r\n flval = (rid, data['detail'].strip(), data['equip'].strip(), jdump(data['base']), \r\n jdump(data['sale']), jdump(data['xiaoqu']), jdump(data['temp']))\r\n sql = \"REPLACE INTO {data} (\"+flids+\") VALUES (%s,%s,%s,%s,%s,%s,%s) \"\r\n data['_res'] = 'add' if not urow else 'upd'\r\n if act=='done':\r\n db.exe(sql, flval)\r\n db.exe(\"UPDATE {url} SET f1=1 WHERE id='\"+str(rid)+\"'\")\r\n return data\r\n\r\ndef url(db, act):\r\n\r\n cmin = int(cfg('pagemin'))\r\n cmax = int(cfg('pagemax'))\r\n cbat = int(cfg('delimit'))\r\n #proc = int(cfg('proc'))\r\n data = {}\r\n data = {'_end':'-', '_pages':''}\r\n act = argv.get('act', 'view')\r\n if act=='done':\r\n page = int(argv.get('page', '1'))\r\n start = max(cmin, page)\r\n end = start + cbat\r\n if end>cmax+1:\r\n end = cmax+1\r\n for i in range(start, end):\r\n res = urlp(db, act, i)\r\n data['_pages'] += str(i) + ','\r\n data['_pend'] = i+1\r\n if i>=cmax:\r\n data['_end'] = 1\r\n else:\r\n page = random.randint(cmin, cmax)\r\n res = urlp(db, act, page)\r\n data['_pages'] = page\r\n # \r\n data['res'] = res\r\n return data\r\n\r\ndef urlp(db, act, page):\r\n\r\n page = str(page);\r\n data = {}\r\n if act=='view':\r\n return db.get(\"SELECT * FROM {url} ORDER BY id LIMIT \"+page+\",5\")\r\n\r\n dmkey = 'div.houselist'\r\n url = liurl(page)\r\n itms = ritms(url, dmkey)\r\n html = ritms(url, 0)\r\n\r\n no = 0\r\n for i in itms:\r\n itm = {}\r\n info = pyq(i).find('.info')\r\n if not info:\r\n continue\r\n tmp = pyq(info).find('h3').find('a')\r\n title = pyq(tmp).text()\r\n itm['url'] = pyq(tmp).attr('href')\r\n fid = itm['url'].split('/')[6];\r\n itm['thumb'] = pyq(i).find('img[width]').attr('data-original').replace('_160_120','')\r\n itm['tags'] = pyq(i).find('.tags').html().replace('',',').replace('','').replace('','')\r\n itm['price'] = pyq(i).find('.dj').text()\r\n itm['address'] = pyq(i).find('p').eq(0).text().replace('查看地图','').replace(' ','').replace('','')\r\n mapurl = pyq(i).find('p').eq(0).find('.icon3').attr('href')\r\n if mapurl:\r\n itm['local'] = mapurl.split('#')[1].split('&zoom')[0].replace('lat=','').replace('&lng=',',');\r\n itm['tmp'] = '';\r\n #print(fid); lat=23.7609534251&lng=114.671497117\r\n no += 1\r\n # save\r\n sql = \"SELECT * FROM {url} WHERE fid=%s\"\r\n row = db.get(sql, (fid,),1)\r\n flval = (itm['url'],fid,title,itm['tags'],itm['price'],itm['address'],itm['thumb'],itm['local'])\r\n #print(row['id'])\r\n if not row:\r\n flids = 'url,fid,title,tags,price,address,thumb,local'\r\n sql = \"INSERT INTO {url} (\"+flids+\") VALUES (%s,%s,%s,%s,%s,%s,%s,%s) \"\r\n itm['_res'] = 'add';\r\n else:\r\n flids = \" url=%s,fid=%s,title=%s,tags=%s,price=%s,address=%s,thumb=%s,local=%s \"\r\n whr = \" WHERE id='\"+str(row['id'])+\"' \"\r\n sql = \"UPDATE {url} SET\" + flids + whr\r\n itm['_res'] = 'upd';\r\n if act=='done':\r\n db.exe(sql, flval)\r\n data[fid+':'+title] = itm\r\n \r\n return data\r\n\r\ndef area(db, act):\r\n\r\n if act=='view':\r\n return db.get(\"SELECT * FROM {attr} ORDER BY id\")\r\n\r\n dic = [{'type':'area', 'dmkey':'.list-1 a'}, # 区域\r\n {'type':'price', 'dmkey':'.list-17 a'}] # 价格区间\r\n \r\n res = {}\r\n for dk in dic:\r\n\r\n itms = ritms(liurl(), dk['dmkey'])\r\n \r\n for i in itms: # baseurl\r\n fid = pyq(i).attr('href').replace('/html/newhouse/','').replace('/1.html','')\r\n fid = fid.replace(argv.cfgs['cjcfg']['baseurl'],'')\r\n title = pyq(i).text()\r\n sql = \"SELECT * FROM {attr} WHERE title=%s AND type=%s\"\r\n row = db.get(sql, (title,dk['type']),1)\r\n if not row:\r\n if act=='done':\r\n sql = \"INSERT INTO {attr} (title,fid,type) VALUES (%s,%s,%s) \"\r\n db.exe(sql, (title,fid,dk['type']))\r\n res[dk['type']+':'+title] = 'add';\r\n else:\r\n res[dk['type']+':'+title] = 'skip';\r\n\r\n return res\r\n #\r\n\r\n\r\ndef ritms(url, dkey):\r\n #url = 'http://newhouse.jx.fang.com/house/s/'\r\n fp = argv.cfgs['dir']['cache'] + '/pages/' + files.autnm(url, 1)\r\n ok = files.tmok(fp, 720)\r\n if ok:\r\n html = files.get(fp, 'utf-8')\r\n else:\r\n html = urlpy.page(url)\r\n files.put(fp, html)\r\n if not dkey:\r\n return html\r\n doc = pyq(html)\r\n return doc(dkey)\r\n #\r\n\r\ndef jdump(dic):\r\n return json.dumps(dic, ensure_ascii=False)\r\n\r\ndef cfg(key=None):\r\n cjcfg = argv.cfgs['cjcfg']; \r\n return cjcfg[key] if key in cjcfg.keys() else None\r\n\r\ndef liurl(page=1):\r\n site = cfg('site')\r\n url = 'http://www.076299.cn/index.php?caid=2&addno=1&page={page}'\r\n return url.replace('{site}',site).replace('{page}',str(page))\r\n","sub_path":"app/libs/cj08cms-hy.py","file_name":"cj08cms-hy.py","file_ext":"py","file_size_in_byte":12081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"279402353","text":"'''\nCreated on 2019. 4. 29.\n\n@author: 702-2-04\n'''\nimport pymysql\n\nconfig = {\n 'host' : '127.0.0.1',\n 'user' : 'scott',\n 'password' : 'tiger',\n 'database' : 'work',\n 'port' : 3306,\n 'charset':'utf8',\n 'use_unicode' : True}\n\ntry :\n conn = pymysql.connect(**config)\n cursor = conn.cursor()\n \n # inner join\n pay = int(input('join 급여 입력 : '))\n sql =f\"\"\"select e.eno, e.ename, e.pay, d.dname, daddr\n from emp e inner join dept d\n on e.dname = d.dname and e.pay >= {pay}\"\"\"\n \n cursor.execute(sql)\n data = cursor.fetchall()\n for row in data :\n print(row[0], row[1], row[2], row[3], row[4])\n\n print('검색된 레코드 수 :', len(data))\n \n # outer join\n dname = input('join 부서명 입력 : ')\n sql =f\"\"\"select e.eno, e.ename, e.pay, d.dname\n from emp e right outer join dept d\n on e.dname = d.dname and d.dname = '{dname}'\"\"\"\n \n cursor.execute(sql)\n data = cursor.fetchall()\n for row in data :\n # 3개 레코드 출력 \n #print(row[0], row[1], row[2], row[3])\n \n # 정상 레코드 출력 \n if row[0] and row[1] and row[2] and row[3] :\n print(row[0], row[1], row[2], row[3]) \n\n print('검색된 레코드 수 :', len(data))\n \n # subquery1 : 부서번호(dept) -> 사원정보(emp) 출력\n dno = int(input('부서번호 입력 : ')) \n sql=f\"\"\"select eno, ename, hiredate, dname from emp \n where dname = (select dname from dept where dno = {dno}) \"\"\"\n \n cursor.execute(sql)\n data = cursor.fetchall()\n for row in data :\n print(row[0], row[1], row[2], row[3])\n \n print('검색된 레코드 수 :', len(data))\n \n # subquery2 : 사원이름(emp) -> 부서정보(dept) 출력 \n name = input('사원 이름 입력 : ')\n sql =f\"\"\"select * from dept where dname = \n (select dname from emp where ename = '{name}')\"\"\"\n \n cursor.execute(sql)\n data = cursor.fetchall()\n for row in data :\n print(row[0], row[1], row[2])\n \n print('검색된 레코드 수 :', len(data))\n \n \nexcept Exception as e :\n print('db error :', e)\nfinally:\n cursor.close()\n conn.close() \n\n\n\n\n","sub_path":"chapter10/lecture2_MariaDB/step07_table_join2.py","file_name":"step07_table_join2.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"429406878","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jan 22 15:47:07 2019\r\n\r\n@author: Howie\r\n\"\"\"\r\n\r\n#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Oct 3 16:04:58 2018\r\n\r\n@author: Howard Chung\r\n\"\"\"\r\n\r\n\r\n# Imports the Google Cloud client library\r\nfrom google.cloud import speech\r\nfrom google.cloud.speech import enums\r\nfrom google.cloud.speech import types\r\nfrom google.cloud import storage\r\nimport datetime\r\nimport io\r\nimport os\r\nimport time\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\n'''\r\n# 語音基礎串接\r\n一分鐘內語音辨識\r\n'''\r\n\r\nos.chdir('your working directory')\r\n#os.chdir('/home/slave1/git/speech2text_1min')\r\n\r\ndef speech_to_text_in_a_min(title_pattern='nlpno', \r\n wd ='re',\r\n json_os = 'speech2text-3de4444fd46a.json',\r\n sample_rate_hertz = 48000):\r\n '''\r\n * json_os:憑證檔的路徑\r\n * title_pattern:錄音檔的名稱模式\r\n * sample_rate_hertz:錄音的取樣頻率\r\n * doc_title:docx文件名稱\r\n * wd:工作目錄\r\n \r\n '''\r\n \r\n # 計時\r\n# start_time = time.time()\r\n # 從python client端對雲端speech2text服務進行驗證\r\n os.environ['GOOGLE_APPLICATION_CREDENTIALS'] =json_os\r\n client = speech.SpeechClient()\r\n \r\n os.chdir(wd)\r\n file_list = os.listdir() \r\n \r\n # 選出title_pattern的錄音檔\r\n select_wav = []\r\n for i in file_list:\r\n if title_pattern in i:\r\n select_wav.append(i)\r\n \r\n # [START migration_sync_request]\r\n # [START migration_audio_config_file]\r\n \r\n aa = pd.DataFrame()\r\n \r\n for music in select_wav:\r\n \r\n # 將 audio錄音檔 讀入進來\r\n with io.open(music, 'rb') as audio_file:\r\n content = audio_file.read()\r\n \r\n # 將錄音檔轉換成google 看得懂的格式\r\n audio = types.RecognitionAudio(content=content)\r\n \r\n # 設定格式錄音檔\r\n config = types.RecognitionConfig(\r\n encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,\r\n sample_rate_hertz=sample_rate_hertz,\r\n language_code='cmn-Hant-TW' ,\r\n enable_word_time_offsets=True)\r\n \r\n # 機器學習文字辨識(speech2text)\r\n print('')\r\n response = client.recognize(config, audio)\r\n \r\n \r\n transcript_list = []\r\n transcript_confidence = []\r\n timerecored = []\r\n # Each result is for a consecutive portion of the audio. Iterate through\r\n # them to get the transcripts for the entire audio file.\r\n for result in response.results:\r\n alternative = result.alternatives[0]\r\n # The first alternative is the most likely one for this portion.\r\n transcript_list.append(alternative.transcript)\r\n transcript_confidence.append(alternative.confidence)\r\n print('Transcript: {}'.format(alternative.transcript))\r\n print('Confidence: {}'.format(alternative.confidence))\r\n \r\n \r\n # begining and end time of a sentence\r\n sentence_start_time = alternative.words[0].start_time\r\n sentence_end_time = alternative.words[len(alternative.words)-1].end_time\r\n \r\n # make time\r\n sentence_start_time = round( sentence_start_time.seconds + sentence_start_time.nanos * 1e-9)\r\n sentence_end_time = round( sentence_end_time.seconds + sentence_end_time.nanos * 1e-9)\r\n \r\n # make min\r\n sentence_start_time= str(datetime.timedelta(seconds=sentence_start_time))\r\n sentence_end_time =str(datetime.timedelta(seconds=sentence_end_time))\r\n timerecored.append([sentence_start_time, sentence_end_time])\r\n \r\n # pandas 建立信心程度資料表\r\n # make df\r\n transcript_df = pd.DataFrame(transcript_list, columns = ['文章段句'])\r\n confidence_df = pd.DataFrame(transcript_confidence, columns = ['機器認字信心水準'])\r\n confidence_df['機器認字信心水準'] = round(confidence_df['機器認字信心水準'],2)\r\n time_df = pd.DataFrame(timerecored, columns = ['start', 'end'])\r\n correctness_summary_df = pd.concat([transcript_df , confidence_df,time_df], axis = 1) \r\n correctness_summary_df = correctness_summary_df.sort_values(['機器認字信心水準'])\r\n correctness_summary_df['改善順序'] = range(1, len(correctness_summary_df)+1)\r\n \r\n timer_translist =[]\r\n for hah,timer in zip(transcript_list,timerecored):\r\n timer_translist.append(hah+' ' +'【'+' to '.join(timer)+'】')\r\n \r\n aa = pd.concat([ aa, correctness_summary_df])\r\n \r\n\r\n return aa.to_csv('文章認字信心矩陣.csv') \r\n\r\n\r\n# main\r\nmatr = speech_to_text_in_a_min(title_pattern='股票_課程', \r\n wd ='stock',\r\n json_os = '憑證檔.json',\r\n sample_rate_hertz = 48000)\r\n\r\n","sub_path":"speech2text_stock/speech2text_audio_stock.py","file_name":"speech2text_audio_stock.py","file_ext":"py","file_size_in_byte":5099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"65542178","text":"# -*- coding: utf-8 -*-\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndata_file = '/Users/rtsearcy/coding/stanford_classes/cee262D_PO/rho_data.csv'\n\ndf = pd.read_csv(data_file)\ndf.set_index('Depth',inplace=True)\ndf = 1000 + 1000*df\n\n#%% Problem 1\n\ng = 9.81\npres = pd.DataFrame(index = df.columns)\n\nfor i in df.columns:\n p = df[i][0.0]\n for j in df.index[1:]:\n p += df[i][j] * g * (j - df.index[j-1])\n pres.loc[i] = p","sub_path":"stanford_classes/cee262D_PO/HW4_3.py","file_name":"HW4_3.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"57888943","text":"\"\"\"\niObject.py\n\nauthor: Andrew Paxson\n\nThis Module holds the lowest base class for the Ion Package.\n\n\"\"\"\nfrom threading import Lock\nfrom collections import OrderedDict\n\nclass iObject(object):\n\n _serial_count = 0\n\n def __init__(self, name=None, can_have_children=True, parent=None):\n \"\"\" iObject is the base class for the ion package which is used for managing film project data.\n\n iObject is a base class that implements some basic requirments for\n the sub class in the Ion package. These include\n\n - a name for each object\n - tree like behavior\n - thread locking parameters.\n\n The resource locking should be utilized for all sub-class methods\n that manipulate or adjust data.\n\n\n Parameters\n ----------\n name : unicode\n unicode string name for the object\n\n can_have_children : bool\n whether the object can possess children underneath\n\n parent : iObject\n optional parent object to use a parent, the parent is\n automatically given the created object a child.\n\n \"\"\"\n if name is None:\n self.__name = u\"iObject{0}\".format(iObject._serial_count)\n iObject._serial_count += 1\n elif isinstance(name, unicode):\n self.__name = name\n else:\n raise TypeError(u\"{0:s} name argument must be a unicode type\".format(name))\n\n self.__parent = None\n self.__set_parent(parent)\n\n self.__lock = None\n self.__children = None\n\n if isinstance(can_have_children, bool):\n self.__can_have_children = can_have_children\n else:\n raise TypeError(\"argument 'can_have_children' must be a bool type!\")\n\n def name(self):\n \"\"\" Returns Name Attribute\n\n Returns\n -------\n unicode\n Returns Unicode name.\n\n \"\"\"\n return self.__name\n\n def parent(self):\n \"\"\" Returns parent object\n\n Returns\n -------\n iObject\n Returns a iObject or None\n \"\"\"\n return self.__parent\n\n def parents(self):\n \"\"\" Iterator Function that will climb up the parent tree\n\n Yields\n -------\n iObject\n \"\"\"\n if self.parent() is not None:\n yield self.parent()\n\n for p in self.parent().parents():\n yield p\n\n def get_lock(self):\n \"\"\" Returns a Lock Primitive the is stored in this object for integrated threading\n\n The get_lock() method is a used to retrieve a Lock Primitive ( from the\n threading module ) which provides a good approach for protecting this\n object's resources from other threads accessing this object's\n attributes. The \"__lock\" attribute is assigned the Lock primitive\n and this is returned. See threading.Lock()\n\n Examples\n -------\n This example demonstrates the use of a lock external but this can be\n used internalwith the object equally as well.\n\n >>> from Ion import iObject\n >>> o = iObject()\n >>> with o.get_lock():\n >>> pass # Do Stuff Here\n\n Returns\n -------\n threading.Lock\n Returns a threading.Lock primitive which can be used to protect\n the method's object's resource when threading. All internal\n methods should implement this lock in order to protect the\n objects resources and make the system thread safe.\n \"\"\"\n\n if self.__lock is None:\n self.__lock = Lock()\n return self.__lock\n\n def has_children(self):\n \"\"\" Returns whether the object has children objects\n\n Returns\n -------\n bool\n Returns True if there are children, False Otherwise\n\n \"\"\"\n if self.__children is not None:\n if len(self.__children) > 0:\n return True\n return False\n\n def children(self, depth=0):\n \"\"\" Yields each child to the depth specified in the tree\n\n This member function yields each child in the tree beneath it (\n object that have the calling object as a parent ) The depth can be\n used to limit the depth of recursion. The default of 0 means on the\n direct children will be yielded. Setting the value to -1 will yield\n all children in the tree by recursing each object beneath.\n\n This member function also calls into the query_children() to populate the tree.\n\n Parameters\n ----------\n depth : int\n The level of recursion to traverse the children tree.\n\n Yields\n -------\n iObject\n The next child in the tree.\n\n \"\"\"\n if self.__can_have_children:\n if self.__children is None:\n if not self.__query_children():\n return\n\n if len(self.__children) == 0:\n return\n\n if depth < 0:\n depth = 10000\n for child in self.__children.itervalues():\n yield child\n if depth > 0:\n for item in child.children(depth-1):\n yield item\n\n def query_children(self):\n \"\"\" A member function that builds the list of children.\n\n This member function allows for sub-classes to perform lazy loads of\n the children. This member function runs whenever the children()\n operation is called giving an opportunity to lazy load the children.\n The default behavior does nothing.\n\n Returns\n -------\n OrderedDict\n\n \"\"\"\n if self.__children is None:\n self.__children = OrderedDict()\n return self.__children\n\n def num_children(self, depth=0):\n \"\"\" Counts the number of children at a certain depth of recursion.\n\n Returns\n -------\n int\n Number of children in a depth of n.\n \"\"\"\n counter = 0\n for child in self.children(depth):\n counter += 1\n return counter\n\n def clear(self):\n \"\"\" Clears the children attribute to None\n\n Returns\n -------\n None\n \"\"\"\n self.__children = None\n\n def __set_parent(self, parent):\n \"\"\" Private Member that sets the parent.\n\n Parameters\n ----------\n parent : iObject\n The object to use as the parent. It must be a instance ( of\n subclass) of the iObject\n\n\n Returns\n -------\n None\n \"\"\"\n if parent is None or isinstance(parent, iObject):\n if self.__parent is not None:\n del self.__parent.__children[self.__name]\n\n if parent is not None:\n parent.__add_child(self)\n\n self.__parent = parent\n else:\n raise TypeError(\"argument parent must be a subclass of ion.iObject\")\n\n def __add_child(self, child):\n \"\"\" Private Member that adds child to children attr\n\n Parameters\n ----------\n child : iObject\n An iObject to add as a child to the children.\n\n\n Returns\n -------\n None\n\n \"\"\"\n if self.__children is None:\n self.__children = OrderedDict()\n self.__children[child.name()] = child\n\n def __query_children(self):\n \"\"\" Private Member Function that performs the query_children() call\n and returns whether children where created.\n\n Returns\n -------\n bool\n Returns whether the query_children() method created children or not.\n \"\"\"\n self.query_children()\n\n if self.__children is None:\n return False\n else:\n return True\n\n def __has_child_by_name(self, name):\n \"\"\" Private Method that queries children tree for a name\n\n Parameters\n ----------\n name : unicode\n name to look for.\n\n Returns\n -------\n bool\n Returns True if the iObject has a child with the matching name,\n returns False otherwise\n \"\"\"\n result = False\n for child in self.children():\n if child.name() == name:\n result = True\n break\n return result\n\n def __contains__(self, item):\n \"\"\" Override of the contains method for \"item in self\"\n\n Parameters\n ----------\n item : iObject\n item to look for.\n\n Returns\n -------\n bool\n \"\"\"\n result = False\n if self.__children is not None:\n result = self.__has_child_by_name(item.name())\n return result\n\n def __iter__(self):\n \"\"\" Override the iterator method\n\n Yields\n -------\n iObject\n \"\"\"\n if self.has_children():\n for child in self.__children():\n yield child\n","sub_path":"Ion/iobject.py","file_name":"iobject.py","file_ext":"py","file_size_in_byte":8919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"19523035","text":"import requests\nimport urllib.request\nfrom bs4 import BeautifulSoup\nimport json\nimport os\nimport cv2\n\n# Url of the page source\nurl = 'http://www.ecoindia.com/flora/trees/'\n\n# Got the source code.\npage = requests.get(url)\n\n# BeautifulSoup Class\nsoup = BeautifulSoup(page.content, 'html.parser')\n\n# Total 9 tables are there on the webpage\ntree_html = soup.find_all('td', valign=\"TOP\", class_='')\n\n# Considering first 20\ntree_td = tree_html[:20]\ntrees = tree_html[:6]\n\n# Initializing empty lists\nname = []\nimage = []\nabout = []\n\n# Iterating over every tree\nfor tree in tree_td:\n # Getting names\n name.append(tree.find('a').get_text())\n\n # Downloading image\n IMG_URL = tree.find_all('img', align=\"MIDDLE\")[0].get('src')\n IMG_PATH = os.path.basename(IMG_URL)\n urllib.request.urlretrieve(IMG_URL, IMG_PATH)\n\n # Getting pixels\n image.append(cv2.cvtColor(cv2.imread(IMG_PATH), cv2.COLOR_BGR2RGB).tolist())\n\n # Deleting the image\n os.remove(IMG_PATH)\n\n # About each trees\n about.append(tree.find('div', align='JUSTIFY', class_=\"text-tbl\").get_text())\n\n# Creating the dictionary\ntrees = dict(name=name, image=image, about=about)\n\n# Creating a file from it\nwith open('trees.csv', 'a') as file:\n json.dump(trees, file)\n","sub_path":"dataset/treespecies/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"177211465","text":"\"\"\"\nCheck balanced\n Implement a function to check if a binary tree is balanced. For the purposes of\n this question, a balanced tree is defined to be a tree such that the heights of the two subtrees\n of any node never differ by more than one.\n\"\"\"\nimport random\nfrom typing import List\n\nfrom data import TreeNode\n\n\ndef check_balanced(tree):\n return check_height(tree) > 0\n\n\ndef check_height(node):\n if not node:\n return -1\n\n sentinel_value = float(\"-inf\")\n\n left_height = check_height(node.left)\n if left_height == sentinel_value:\n return sentinel_value\n\n right_height = check_height(node.right)\n if right_height == sentinel_value:\n return sentinel_value\n\n if abs(left_height - right_height) > 1:\n return sentinel_value\n\n return max(left_height, right_height) + 1\n\n\ndef build_tree(array, start, end, unbalanced=True):\n if end < start:\n return\n\n mid = (start + end) // 2\n\n node = TreeNode(array[mid])\n node.left = build_tree(array, start, mid - 1, unbalanced)\n\n if random.randint(0, 10) > 5 or not unbalanced:\n node.right = build_tree(array, mid + 1, end, unbalanced)\n\n return node\n\n\nif __name__ == \"__main__\":\n tree_unbalanced = build_tree(list(range(1, 101)), 0, 99)\n tree_balanced = build_tree(list(range(1, 101)), 0, 99, unbalanced=False)\n print(\"Is balanced (unbalanced) ->\", check_balanced(tree_unbalanced))\n print(\"Is balanced (balanced) ->\", check_balanced(tree_balanced))\n","sub_path":"python/trees_graphs/question_4_4.py","file_name":"question_4_4.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"171961101","text":"from base.base_func import BaseService\nfrom .api_config import APINameList, APIConfig\nfrom settings import TEST_ENV\nfrom .request_response import * # Keep this, or [api_name + 'Request'] class wil not be found in globals()\n\n\nclass APIService(BaseService):\n\n @classmethod\n def call_api(cls, api_name=None, request_body=None, status_success=200, api_config_class=APIConfig, req_class=None, res_class=None):\n return super().call_api(req_class=globals()[api_name + 'Request'], res_class=globals()[api_name + 'Response'], api_config_class=api_config_class, env=TEST_ENV, api_name=api_name, request_body=request_body, status_success=status_success)\n\n @classmethod\n def authorization(cls, request_body=None):\n request_body = AuthorizationRequest.get_default_body() if request_body is None else request_body\n api_name = APINameList.Authorization\n ret, res = cls.call_api(api_name=api_name, request_body=request_body)\n return ret, res\n\n @classmethod\n def compare(cls, request_body=None):\n request_body = CompareRequest.get_default_body() if request_body is None else request_body\n api_name = APINameList.Compare\n ret, res = cls.call_api(api_name=api_name, request_body=request_body)\n return ret, res\n\n","sub_path":"project/example/services/api_service.py","file_name":"api_service.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"407090256","text":"#爬取豆瓣Top250电影的名字和评分\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\n\r\n\r\ndef getHTMLText(url):\r\n r = requests.get(url, timeout=30)\r\n r.raise_for_status()\r\n r.encoding = r.apparent_encoding\r\n return r.text\r\n\r\ndef parsePage(html, list):\r\n soup = BeautifulSoup(html, 'html.parser')\r\n for titles in soup.find(class_='article').find_all(class_='info'):\r\n for title in titles.find(class_='title'):\r\n # print(title)\r\n list.append(title)\r\n for rate in titles.find(class_='rating_num'):\r\n # print(rate)\r\n list.append(rate)\r\n return list\r\n\r\ndef printTitle(list):\r\n form = '{0:^4}{1:{3}^16}{2:^6}'\r\n print(form.format('排名', '影片名称', '评分', chr(12288)))\r\n count = 0\r\n for i in range(250):\r\n count += 1\r\n print(form.format(count, list[i*2], list[i*2+1], chr(12288)))\r\n\r\ndef main():\r\n depth = 10\r\n start_url = 'https://movie.douban.com/top250'\r\n titleList = []\r\n for i in range(depth):\r\n try:\r\n url = start_url+'?start='+str(25*i)\r\n # print(url)\r\n html = getHTMLText(url)\r\n parsePage(html,titleList)\r\n except:\r\n continue\r\n printTitle(titleList)\r\nmain()","sub_path":"Mooc/week3/crawlDouBan2.py","file_name":"crawlDouBan2.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"505938050","text":"#!/usr/bin/env python3\n\nimport random\n\ndef parse_file(filename):\n for line in open(filename):\n _,name,_,_=line.strip().split(\",\")\n yield name\n\ndef make_groups(names,size=6):\n random.shuffle(names)\n groups=[[]]\n for index,name in enumerate(names):\n groups[-1].append(name)\n if index%size==size-1:\n groups.append([])\n return groups\n\ndef search(name, groups):\n for group in groups:\n for member in group:\n if name.lower() in member.lower():\n return group\n\ndef pretty_print(groups):\n for index,group in enumerate(groups):\n number=index+1\n print(\"Group \", number)\n for member in group:\n print(member)\n print(\"\")\n\ndef main(filename):\n names=[i for i in parse_file(filename)]\n groups=make_groups(names)\n pretty_print(groups)\n\nif __name__==\"__main__\":\n from sys import argv\n main(argv[-1])\n","sub_path":"Code Snippets/class_shuffle.py","file_name":"class_shuffle.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"654058465","text":"import argparse\r\nimport urllib\r\nimport cv2\r\nimport dlib\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom dlib.python_examples.cnn_face_detector import rects\r\nfrom imutils import face_utils\r\n\r\nfrom getImages import image\r\n\r\nfrontalface_detector = dlib.get_frontal_face_detector()\r\n\r\nap = argparse.ArgumentParser()\r\nargs = vars(ap.parse_args())\r\npredictor = dlib.shape_predictor(args[\"shape_predictor\"])\r\n\r\n\r\ndef rect_to_bb(rect):\r\n x = rect.left()\r\n y = rect.top()\r\n w = rect.right() - x\r\n h = rect.bottom() - y\r\n return x, y, w, h\r\n\r\n\r\ndef shape_to_np(shape, dtype=\"int\"):\r\n coords = np.zeros((68, 2), dtype=dtype)\r\n\r\n for i in range(0, 68):\r\n coords[i] = (shape.part(i).x, shape.part(i).y)\r\n\r\n return coords\r\n\r\n\r\ndef detect_face(image_url):\r\n try:\r\n url_response = urllib.request.urlopen(image_url)\r\n img_array = np.array(bytearray(url_response.read()), dtype=np.uint8)\r\n image = cv2.imdecode(img_array, -1)\r\n rects = frontalface_detector(image, 1)\r\n if len(rects) < 1:\r\n return \"No Face Detected\"\r\n finally:\r\n return \"Face detected successfully\"\r\n\r\n\r\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n\r\nfor (i, rect) in enumerate(rects):\r\n shape = predictor(gray, rect)\r\n shape = face_utils.shape_to_np(shape)\r\n (x, y, w, h) = rect_to_bb(rect)\r\n cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)\r\n plt.imshow(image, interpolation='nearest')\r\n\r\n for (x, y) in shape:\r\n cv2.circle(image, (x, y), 1, (0, 0, 255), -1)\r\n\r\nplt.axis('off')\r\nplt.show()\r\n","sub_path":"faceDetection1.py","file_name":"faceDetection1.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"585395605","text":"# -*- coding: utf-8 -*-\n\nimport random\nimport logging\nimport os\n\nfrom cookielib import CookieJar as cj\n\nVERSION = '0.0.3'\n\nPARENT_DIR = os.path.dirname(os.path.abspath(__file__))\n\nPOP_URLS_FILEN = os.path.join(PARENT_DIR, 'data/popular_sources.txt')\nUSERAGENTS_FN = os.path.join(PARENT_DIR, 'data/useragents.txt')\nSTOPWORDS_EN_FN = os.path.join(PARENT_DIR, 'data/stopwords_en.txt')\nSTOPWORDS_EN_FN_2 = os.path.join(PARENT_DIR, 'data/stopwords_en2.txt')\n\nDATA_DIR = '.newspaper_scraper'\n\nTOPDIR = os.path.join(os.path.expanduser(\"~\"), DATA_DIR)\nif not os.path.exists(TOPDIR):\n os.mkdir(TOPDIR)\n\n# Error log\nLOGFILE = os.path.join(TOPDIR, 'newspaper_errors_%s.log' % VERSION)\nM_LOGFILE = os.path.join(TOPDIR, 'newspaper_monitors_%s.log' % VERSION)\n\n# Memo directory (same for all concur crawlers)\nMEMO_FILE = 'memoized'\nMEMODIR = os.path.join(TOPDIR, MEMO_FILE)\n\nif not os.path.exists(MEMODIR):\n os.mkdir(MEMODIR)\n\n# category and feed cache\nCF_CACHE_DIR = 'feed_category_cache'\nANCHOR_DIR = os.path.join(TOPDIR, CF_CACHE_DIR)\n\nif not os.path.exists(ANCHOR_DIR):\n os.mkdir(ANCHOR_DIR)\n\nUSERAGENT = 'newspaper/%s' % VERSION\n\nTRENDING_URL = 'http://www.google.com/trends/hottrends/atom/feed?pn=p1'\n\nMAX_FILE_MEMO = 20000\n\n\n","sub_path":"newspaper/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"69861239","text":"import feedparser\nimport os\nimport boto3\nimport tempfile\nimport urllib.request\nimport logging as LOGGER\nimport json\n\n__location__ = os.path.realpath(\n os.path.join(os.getcwd(), os.path.dirname(__file__)))\n\n# AWS Clients\nPRE_PROCESSED_KEY = \"raw/\"\nPOST_PROCESSED_KEY = \"processed/\"\nSCRATCH_LOCATION_KEY = \"scratch/\"\nTRANSCRIBE_CLIENT = boto3.client('transcribe')\nS3_CLIENT = boto3.client('s3')\nENVIRONMENT_VARIABLES = [\"S3_BUCKET\", \"REGION\", \"TRANSCRIPTION_JOB_PREFIX\", \"PODCAST_NAME\", \"PODCAST_FEED\"]\n\n\ndef process_podcasts():\n LOGGER.info(\"Processing Podcasts from RSS Feed\")\n [trigger_transcription(podcast) for podcast in get_unprocessed_podcasts()]\n\n\ndef move_transcription_output(transcription_job_name):\n s3_key = PRE_PROCESSED_KEY + transcription_job_name[len(os.environ[\"TRANSCRIPTION_JOB_PREFIX\"]):] + \".json\"\n LOGGER.info(\"Uploading transcription output to: '\" + s3_key + \"'\")\n\n job = TRANSCRIBE_CLIENT.get_transcription_job(TranscriptionJobName=transcription_job_name)\n if job[\"TranscriptionJob\"][\"TranscriptionJobStatus\"] == \"COMPLETED\":\n upload_file_to_s3(\n url=job[\"TranscriptionJob\"][\"Transcript\"][\"TranscriptFileUri\"],\n key=s3_key\n )\n\ndef process_all_podcasts():\n\n for file_key in get_filelist_from_s3():\n obj = boto3.resource('s3').Object(os.environ[\"S3_BUCKET\"], file_key)\n\n boto3.resource('s3').Object(\n os.environ[\"S3_BUCKET\"],\n POST_PROCESSED_KEY + file_key[len(PRE_PROCESSED_KEY):] # Change Key to correct target\n ).put(\n Body=json.dumps(profanity_processor(json.load(obj.get()['Body'].read())))\n )\n\n\ndef profanity_processor(transcript):\n\n LOGGER.info(\"Processing Profanities\")\n\n with open(os.path.join(__location__, 'swearwords.json')) as file:\n profanity_dictionary = {word: 0 for word in json.load(file)}\n\n for word in transcript[\"results\"][\"transcripts\"][0][\"transcript\"].split(\" \"):\n if word in profanity_dictionary.keys():\n profanity_dictionary[word] = profanity_dictionary[word] + 1\n\n # Strip any swearwords that aren't instantiated\n return {word: profanity_dictionary[word] for word in profanity_dictionary if profanity_dictionary[word] > 0}\n\n\n\n\ndef trigger_transcription(podcast):\n upload_file_to_s3(url=podcast[\"url\"], key=podcast[\"s3_key\"])\n\n s3_url = \"https://s3-\" + os.environ[\"REGION\"] + \".amazonaws.com/\" + os.environ[\"S3_BUCKET\"] + \"/\" + podcast[\"s3_key\"]\n transcription_job_name = os.environ[\"TRANSCRIPTION_JOB_PREFIX\"] + podcast[\"name\"]\n\n LOGGER.info(\"Triggering transcription Job: \" + transcription_job_name)\n return TRANSCRIBE_CLIENT.start_transcription_job(\n TranscriptionJobName=transcription_job_name,\n LanguageCode=\"en-AU\",\n MediaFormat=get_and_validate_filetype(s3_url),\n Media={'MediaFileUri': s3_url}\n )\n\n\ndef get_podcast_name(podcast):\n return podcast.media_content[0]['url'].split(\"/\")[-1].split(\".\")[0]\n\n\ndef get_podcast_s3_key(podcast):\n return SCRATCH_LOCATION_KEY + podcast.media_content[0]['url'].split(\"/\")[-1]\n\n\ndef get_unprocessed_podcasts():\n # Searches the RSS feed for latest podcasts, and uploads them to S3 for Processing\n return [\n {\n \"name\": get_podcast_name(podcast),\n \"s3_key\": get_podcast_s3_key(podcast),\n \"url\": podcast.media_content[0]['url']\n }\n for podcast in feedparser.parse(os.environ[\"PODCAST_FEED\"]).entries\n if PRE_PROCESSED_KEY + get_podcast_name(podcast) + \".json\"\n not in get_filelist_from_s3(PRE_PROCESSED_KEY)\n ]\n\n\ndef check_environment_variables(environment_variables):\n missing_variables = []\n for variable in environment_variables:\n if variable not in os.environ:\n missing_variables.append(variable)\n\n if len(missing_variables) > 0:\n raise LookupError(\"The following environment variables have not been set: '\" +\n ', '.join(missing_variables) + \"'\")\n\n\ndef get_and_validate_filetype(s3_file_location):\n for file_type in ['mp3', 'mp4', 'wav', 'flac']:\n if s3_file_location.endswith(file_type):\n return file_type\n\n raise Exception(\"InvalidFileType\")\n\n\ndef get_filelist_from_s3(prefix):\n\n response = S3_CLIENT.list_objects_v2(\n Bucket=os.environ[\"S3_BUCKET\"],\n Prefix=prefix\n )\n\n if response[\"KeyCount\"] == 0:\n return []\n\n return [file[\"Key\"] for file in response[\"Contents\"]]\n\n\ndef upload_file_to_s3(url, key):\n\n LOGGER.info(\"Uploading file to S3: \" + url)\n with tempfile.NamedTemporaryFile() as temporary_file:\n urllib.request.urlretrieve(url, temporary_file.name)\n return S3_CLIENT.upload_file(temporary_file.name, os.environ[\"S3_BUCKET\"], key)\n\n\ncheck_environment_variables(ENVIRONMENT_VARIABLES)\n\n","sub_path":"processor/src/swearjar.py","file_name":"swearjar.py","file_ext":"py","file_size_in_byte":4827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"451936082","text":"# -*- coding: utf-8 -*-\n\nway = 16\nangle = RAD * 50\n\nmatrices = Matrix3(1, 0, 0, 0, 1, 0, 0, 0, 1), Matrix3(0, 1, 0, 0, 0, 1, 1, 0, 0), Matrix3(0, 0, 1, 1, 0, 0, 0, 1, 0)\n\nvec_axis_list = []\nfor i in range(way):\n\tvec = -Vector3.UnitZ * Matrix3.RotationX(angle) * Matrix3.RotationY(RAD * (360.0 / way) * i)\n\taxis = cross2(vec, Vector3.UnitY)\n\tvec *= Matrix3.RotationAxis(axis, RAD * (360.0 / way) * -i)\n\t\n\tfor mat in matrices[0:1]:\n\t\tvec_axis_list.append((vec * mat, axis * mat))\n\ndef task(vec, axis, veclist = objvertices(\"ico_y.obj\", 0)):\n\tdef shot_s(binder = [vec], rot = Quaternion.RotationAxis(axis, RAD * 10)):\n\t\ttrans_mat = Matrix3(1, 0, 0, 0, 0, 1, 0, 1, 0) * Matrix3.LookAt(-binder[0], Vector3.UnitY)\n\t\t\n\t\tfor v in veclist:\n\t\t\tv *= trans_mat\n\t\t\t\n\t\t\tif v * binder[0] <= 0: continue\n\t\t\t\n\t\t\tshot = EntityShotStraight(WORLD, \"S\", 0xFF0000)\n\t\t\tshot.Pos = binder[0] * 160\n\t\t\tshot.Velocity = v * 4\n\t\t\tshot.LifeSpan = 200\n\t\t\tshot.Spawn()\n\t\t\n\t\tbinder[0] *= rot\n\tWORLD.AddTask(shot_s, 2, 200, 0)\n\nfor vec, axis in vec_axis_list:\n\ttask(vec, axis)\n\ttask(vec, -axis)\n\ndef laser_task(pos):\n\tparent = EntityShot(WORLD, \"M\", 0xFF0000)\n\tparent.Pos = pos\n\t\n\tdef short_laser():\n\t\tshot = EntityShotStraight(WORLD, \"DIA\", 0xFF0000, Vector3(2, 2, 16))\n\t\tshot.Pos = parent.Pos\n\t\tshot.Velocity = normalize(TARGET_BONE.WorldPos - shot.Pos) * 12\n\t\tshot.LifeSpan = 120\n\t\tshot.Spawn()\n\tparent.AddTask(short_laser, 20, 10, 60)\n\t\n\tdef long_laser():\n\t\tshot = EntityShot(WORLD, \"LASER_LINE\", 0xFF0000, Vector3(5, 5, 4000))\n\t\tshot.Pos = parent.Pos\n\t\tshot.LookAtVec = normalize(TARGET_BONE.WorldPos - shot.Pos) * 12\n\t\tshot.LifeSpan = 35\n\t\t\n\t\tmorph = shot.CreateVertexMorph(0, lambda v: Vector3(v.x * -0.99, v.y * -0.99, 0))\n\t\tshot.AddMorphKeyFrame(morph, 1, 0)\n\t\tshot.AddMorphKeyFrame(morph, 0, 15)\n\t\t\n\t\tshot.Spawn()\n\tparent.AddTask(long_laser, 60, 5, 90)\n\tparent.Spawn()\n\nfor pos in [Vector3(x * 100, y * 100, 0) for x in [1, -1] for y in [1, -1]]:\n\tlaser_task(pos)\n","sub_path":"Th08-東方永夜抄/Stage6B/神宝「サラマンダーシールド」.py","file_name":"神宝「サラマンダーシールド」.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"402775411","text":"import ast\n\nimport sap.compiler_base\n\n\nclass BaseError(Exception):\n def __init__(self, *args):\n raise NotImplementedError(\n 'BaseError cannot be initialized - use SemanticError instead')\n\n def make_message(self, compiler, node):\n prefix, suffix = self.prefix_suffix(compiler, node)\n self.message = prefix + self.msg + suffix\n return\n\n def prefix_suffix(self, compiler, node):\n if (compiler is None) or (not hasattr(compiler, 'function')):\n return '', ''\n try:\n fname = compiler.function.func_name\n except:\n return '', ''\n\n if node is None:\n return '%s: ' % fname, ''\n lineno = getattr(node, 'lineno', None)\n col_offset = getattr(node, 'col_offset', 0)\n\n prefix = '%s: ' % fname\n suffix = ''\n if lineno is not None:\n try:\n prefix = '\\n%s:%d:%s: ' % (compiler.filename, lineno, fname)\n except:\n prefix = '%s: ' % fname\n try:\n suffix = '\\n' + \\\n [x for x in open(compiler.filename)\n ][lineno - 1] + (' ' * col_offset + '^')\n except:\n pass\n return prefix, suffix\n\n\nclass SemanticError(BaseError):\n def __init__(self, compiler, node, msg):\n if hasattr(self, 'msg'):\n self.msg += msg\n else:\n self.msg = msg\n self.make_message(compiler, node)\n return\n\n\nclass ArityError(SemanticError):\n def __init__(self, compiler, node, actual, expected):\n assert actual != expected, 'Trying to throw an ArityError where actual is equal to expected'\n SemanticError.__init__(\n self, compiler, node, 'Expected arity of %d, actual arity of %d' % (expected, actual))\n return\n\n\nclass AssignmentArityError(SemanticError):\n def __init__(self, compiler, node, lhs, rhs):\n assert len(lhs) != len(\n rhs), 'Trying to throw an AssignmentArityError where lhs and rhs have same len'\n assert isinstance(\n node, ast.Assign), 'node passed into AssignmentArityError must be of type ast.Assign'\n SemanticError.__init__(\n self, compiler, node, 'lhs has arity %d, rhs has arity %d' % (len(lhs), len(rhs)))\n return\n\n\nclass NotSupportedError(SemanticError):\n def __init__(self, compiler, node):\n SemanticError.__init__(self, compiler, node, 'Not supported in SAP')\n return\n\n\nclass SymbolTableError(SemanticError):\n def __init__(self, compiler, node):\n if hasattr(compiler, 'symtab'):\n if hasattr(self, 'msg'):\n self.msg += '\\nSymbol table has %d levels' % compiler.symtab.num_levels\n else:\n self.msg = '\\nSymbol table has %d levels' % compiler.symtab.num_levels\n SemanticError.__init__(self, compiler, node, '')\n return\n\n\nclass SingleAssignmentError(SymbolTableError):\n def __init__(self, compiler, node):\n self.msg = 'Single Assignment Violation, %s exists' % node.id\n if hasattr(compiler, 'symtab'):\n self.msg += ' in level %d of the symbol table' % compiler.symtab.find(\n node.id)\n SymbolTableError.__init__(self, compiler, node)\n return\n\n\nclass SymbolLookupError(SymbolTableError):\n def __init__(self, compiler, node):\n self.msg = 'Name %s not found in symbol table' % node.id\n SymbolTableError.__init__(self, compiler, node)\n return\n\n\nclass PlaceholderError(SymbolTableError):\n def __init__(self, compiler, node):\n self.msg = 'Name %s is assigned to a placeholder value' % node.id\n SymbolTableError.__init__(self, compiler, node)\n return\n\n\nclass SymbolNameError(SymbolTableError):\n def __init__(self, compiler, node):\n self.msg = 'LHS must be a name'\n SymbolTableError.__init__(self, compiler, node)\n return\n\n\nclass SAPTypeError(SemanticError):\n def __init__(self, compiler, node):\n SemanticError.__init__(self, compiler, node, 'Types do not match')\n return\n\n\nclass CompilerError(object):\n \"\"\"A class which contains methods for Compiler error calls\"\"\"\n\n def __init__(self, compiler):\n assert isinstance(\n compiler, sap.compiler_base.CompilerBase), 'Compiler passed in to CompilerError is not valid'\n self.compiler = compiler\n return\n\n def semantic_error(self, node, msg):\n raise SemanticError(self.compiler, node, msg)\n\n def arity_error(self, node, actual, expected):\n raise ArityError(self.compiler, node, actual, expected)\n\n def assignment_arity_error(self, node, lhs, rhs):\n raise AssignmentArityError(self.compiler, node, lhs, rhs)\n\n def not_supported_error(self, node):\n raise NotSupportedError(self.compiler, node)\n\n def symbol_table_error(self, node):\n raise SymbolTableError(self.compiler, node)\n\n def single_assignment_error(self, node):\n raise SingleAssignmentError(self.compiler, node)\n\n def symbol_lookup_error(self, node):\n raise SymbolLookupError(self.compiler, node)\n\n def placeholder_error(self, node):\n raise PlaceholderError(self.compiler, node)\n\n def symbol_name_error(self, node):\n raise SymbolNameError(self.compiler, node)\n\n def type_error(self, node):\n raise SAPTypeError(self.compiler, node)\n","sub_path":"sap/error.py","file_name":"error.py","file_ext":"py","file_size_in_byte":5410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"127029032","text":"import tkinter\nfrom tkinter import filedialog\n\nclass lastStep(tkinter.Frame):\n def __init__(self, master, configuration):\n super().__init__(master)\n self.configuration = configuration\n\n self.enteringFrame = tkinter.Frame(self);\n\n self.enteringLabel = tkinter.Label(self.enteringFrame, text=\"Dateipfad zum gewünschten Speicherort:\")\n self.enteringLabel.pack(side=tkinter.TOP, anchor=\"w\")\n\n self.entryForFilepath = tkinter.Entry(self.enteringFrame, width=50)\n self.entryForFilepath.pack(side=tkinter.LEFT)\n\n self.buttonForFolderFinder = tkinter.Button(self.enteringFrame, text=\"...\", width=3, command=self.getFilepath)\n self.buttonForFolderFinder.pack(side=tkinter.RIGHT)\n\n self.enteringFrame.pack(anchor=\"w\")\n\n self.filenameFrame = tkinter.Frame(self)\n\n self.filenameLabel = tkinter.Label(self.filenameFrame, text=\"Vorlagenname:\")\n self.filenameLabel.pack(side=tkinter.TOP, anchor=\"w\")\n\n self.filenameEntry = tkinter.Entry(self.filenameFrame, width=50)\n self.filenameEntry.pack(side=tkinter.LEFT)\n\n self.filenameFrame.pack(anchor=\"w\")\n\n def getFilepath(self):\n temp = filedialog.askdirectory(title = \"Dateipfad auswählen\", initialdir=\".\", mustexist = True)\n self.entryForFilepath.delete(0, tkinter.END)\n self.entryForFilepath.insert(0, temp)\n \n\n","sub_path":"Developement/GB2/WizardLastStep.py","file_name":"WizardLastStep.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"8254215","text":"import json\nimport socket\nimport threading\n\nimport conf\nfrom model.logger import logging, setup_logging\n\nsetup_logging()\n\"\"\"\n平台的 socket 发送模块\n\"\"\"\n\nMAX_CONNECTIONS = 99\n\nsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nip = conf.params['platform_ip']\nport = conf.params['platform_port']\nconnections = {}\n\n\ndef start():\n # 进行 socket 绑定\n socket.bind((ip, port))\n logging.info('[Platform Server] 启动')\n print('[Platform Server] 启动')\n socket.listen(MAX_CONNECTIONS)\n while True:\n connection, address = socket.accept()\n logging.info('[Platform Server] 收到一个新连接')\n print('[Platform Server] 收到一个新连接', connection.getsockname(),\n connection.fileno())\n try:\n buffer = connection.recv(1024).decode()\n obj = json.loads(buffer)\n device_id = 'platform' if obj[\n 'type'] == 'platform' else gen_device_id(obj['type'])\n connections[device_id] = connection\n connection.send(json.dumps({'ok': True, 'id': device_id}).encode())\n thread = threading.Thread(target=device_thread, args=(device_id, ))\n thread.start()\n except Exception:\n logging.warning(\n '[Platform Server] 无法接受数据: %s' % connection.getsockname())\n print('[Platform Server] 无法接受数据:', connection.getsockname(),\n connection.fileno())\n\n\ndef gen_device_id(device_type):\n # 生成设备 id\n return device_type + str(len(connections))\n\n\ndef device_thread(device_id):\n connection = connections[device_id]\n logging.info('[Platform Server] 设备 %s 已连接' % device_id)\n print('[Platform Server] 设备', device_id, '已连接')\n while True:\n try:\n buffer = connection.recv(1024).decode()\n # 解析成json数据\n obj = json.loads(buffer)\n if obj.get('target'):\n send(device_id, obj)\n else:\n logging.warning('[Platform Server] 无法解析json数据包: %s' %\n connection.getsockname())\n print('[Platform Server] 无法解析json数据包:',\n connection.getsockname(), connection.fileno())\n except Exception:\n logging.warning(\n '[Platform Server] 连接失效: %s' % connection.getsockname())\n print('[Platform Server] 连接失效:', connection.getsockname(),\n connection.fileno())\n break\n logging.info('[Platform Server] 设备 %s 已结束连接' % device_id)\n print('[Platform Server] 设备', device_id, '已结束连接')\n\n\ndef send(device_id, obj):\n target_id = obj['target']\n logging.info(\n '[Platform Server] 收到 %s 发送给 %s 的消息:%s' % (device_id, target_id, obj))\n print('[Platform Server] 收到', device_id, '发送给', target_id, '的消息:', obj)\n obj['device_id'] = device_id\n connections[target_id].send(json.dumps(obj).encode())\n","sub_path":"src/network/platform/socket_server.py","file_name":"socket_server.py","file_ext":"py","file_size_in_byte":3077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"591630385","text":"# coding=utf-8\nimport re\nimport urllib2\nfrom bs4 import BeautifulSoup\n\nBUSCA_CEP_SERVICE_BASE_URL = \"http://www.buscacep.correios.com.br/servicos/dnec/consultaEnderecoAction.do?relaxation={}&TipoCep=ALL&semelhante=S&cfm=1&Metodo=listaLogradouro&TipoConsulta=relaxation\"\n\n\nclass Cep:\n \"\"\"\n Estrutura de dado para armazenar CEPs\n \"\"\"\n def __init__(self, numero, logradouro, bairro, localidade, uf):\n self.numero = numero\n self.logradouro = logradouro\n self.bairro = bairro\n self.localidade = localidade\n self.uf = uf\n\n def __repr__(self):\n import json\n return json.dumps(self.__dict__)\n\n\ndef busca(query):\n \"\"\"\n Invoca o serviço buscacep do site dos correios, e extrai do HTML retornado a lista de ceps encontrados\n :param query: pode ser um numero de cep ou um logradouro\n :return: lista de Ceps encontrados\n \"\"\"\n content = urllib2.urlopen(BUSCA_CEP_SERVICE_BASE_URL.format(query)).read()\n soup = BeautifulSoup(content)\n trs = soup.findAll('tr', onclick=re.compile('javascript:detalharCep.*'))\n cep_list = []\n for tr in trs:\n td = tr.find_all('td')\n cep_list.append(Cep(\n numero=td[4].string,\n logradouro=td[0].string,\n bairro=td[1].string,\n localidade=td[2].string,\n uf=td[3].string\n ))\n\n return cep_list","sub_path":"buscacep/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"284140202","text":"import os\n\nimport numpy as np\nimport pandas as pd\n\nfrom trips_count_predictor.config.config import data_paths_dict\n\n\nclass MinneapolisLoader:\n\n\tdef __init__(self):\n\n\t\tself.city = \"Minneapolis\"\n\t\tself.provider = \"city_of_minneapolis\"\n\t\tself.months_dict = {\n\t\t\t5: \"May\",\n\t\t\t6: \"June\",\n\t\t\t7: \"July\",\n\t\t\t8: \"August\",\n\t\t}\n\t\tself.raw_trips_data_path = os.path.join(\n\t\t\tdata_paths_dict[\"raw_trips_data\"],\n\t\t\tself.city\n\t\t)\n\t\tself.raw_weather_data_path = os.path.join(\n\t\t\tdata_paths_dict[\"raw_weather_data\"],\n\t\t\tself.city\n\t\t)\n\n\tdef load_raw_trips_data(self, year, month):\n\t\tmonth = self.months_dict[month]\n\t\tyear = str(year)\n\t\tdata_path = os.path.join(\n\t\t\tself.raw_trips_data_path,\n\t\t\tself.provider,\n\t\t\t\"_\".join([\"Motorized\", \"Foot\", \"Scooter\", \"Trips\", month, year]) + \".csv\"\n\t\t)\n\t\tdf = pd.read_csv(data_path, parse_dates=[3, 4])\n\t\tdf.StartTime = df.StartTime.dt.tz_convert('America/Chicago')\n\t\tdf.EndTime = df.EndTime.dt.tz_convert('America/Chicago')\n\t\treturn df\n","sub_path":"trips_count_predictor/city_loader/provider_loaders/minneapolis.py","file_name":"minneapolis.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"157031632","text":"from PIL import Image\nfrom PIL.ImageOps import grayscale, colorize\nfrom PIL.ImageChops import offset\n\n\n# Apply colour to image\ndef tintImage(img, tint):\n i = colorize(grayscale(img), (0, 0, 0), tint)\n i.putalpha(img.split()[3])\n return i\n\n\n# Crops sprite from Spritesheet\ndef cropImg(img, location, defaultSize=(16, 16), objectSize=(16, 16), resize=False, displacement=(0, 0)):\n row = int(img.width / (defaultSize[0]))\n x = (location % row) * defaultSize[0]\n y = (location // row) * defaultSize[1]\n image = offset(img, -x, -y).crop((0, 0, objectSize[0], objectSize[1]))\n\n if resize:\n base = Image.new(\"RGBA\", (16, 32), (0, 0, 0, 0))\n base.paste(image, displacement, image)\n image = base\n return image\n\n\n# Paints a square a given colour\ndef colourBox(x, y, colour, pixels, scale=8):\n for i in range(scale):\n for j in range(scale):\n try:\n pixels[x*scale + i, y*scale + j] = colour\n except IndexError:\n pass\n return pixels\n","sub_path":"sdv/imagegeneration/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"235235166","text":"import os\nimport unittest\n\nfrom flask_migrate import Migrate, MigrateCommand\nfrom flask_script import Manager\nfrom main import create_app, db \nfrom main.model import User , Post\n\napp = create_app(os.getenv('BOILERPLATE_ENV') or 'dev')\n\napp.app_context().push()\n\nmanager = Manager(app)\n\nmigrate = Migrate(app, db)\n\nmanager.add_command('db', MigrateCommand)\n\n@manager.command\ndef run():\n app.run()\n\n@manager.command\ndef test():\n \"\"\"Runs the unit tests.\"\"\"\n tests = unittest.TestLoader().discover('app/test', pattern='test*.py')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n return 0\n return 1\n\n@manager.command\ndef seed():\n \"\"\"Add seed data to the database.\"\"\"\n db.create_all()\n john = User(username='Lony Das')\n post = Post()\n post.title = \"Story time at Libray\"\n post.body = \"This is the first post\"\n post.author = john\n db.session.add(post)\n db.session.add(john)\n db.session.commit()\n print(User.query.all())\n print(Post.query.all())\n\nif __name__ == '__main__':\n manager.run()","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"608721413","text":"def _current_cask_is_installed(self):\n if (not self.valid_cask(self.current_cask)):\n self.failed = True\n self.message = 'Invalid cask: {0}.'.format(self.current_cask)\n raise HomebrewCaskException(self.message)\n cmd = ['{brew_path}'.format(brew_path=self.brew_path), 'cask', 'list', self.current_cask]\n (rc, out, err) = self.module.run_command(cmd)\n if re.search('Error: Cask .* is not installed.', err):\n return False\n else:\n return True","sub_path":"Data Set/bug-fixing-4/404b7140b61ba99591ff10cfa108ebf689fe4f5b-<_current_cask_is_installed>-bug.py","file_name":"404b7140b61ba99591ff10cfa108ebf689fe4f5b-<_current_cask_is_installed>-bug.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"253107533","text":"from contextlib import contextmanager\n\nimport pytest\nimport requests_mock\n\n\nclass ContextAdapter(requests_mock.Adapter):\n \"\"\"\n requests_mock adapter where ``register_uri`` returns a context manager\n \"\"\"\n @contextmanager\n def register_uri(self, *args, **kwargs):\n matcher = super().register_uri(*args, **kwargs)\n\n yield matcher\n\n self.remove_matcher(matcher)\n\n def remove_matcher(self, matcher):\n if matcher in self._matchers:\n self._matchers.remove(matcher)\n\n\n@pytest.fixture(scope='function')\ndef mocker():\n with requests_mock.Mocker(\n adapter=ContextAdapter(case_sensitive=True)\n ) as mocker_ins:\n yield mocker_ins\n","sub_path":"pyvo/auth/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"400059839","text":"from flask import Flask, render_template, request, redirect, session, flash\nfrom mysqlconnection import connectToMySQL\nfrom datetime import datetime\nimport re\n\nEMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\\.[a-zA-Z]+$')\napp = Flask(__name__)\napp.secret_key=\"key\" \n\n@app.route('/')\ndef home():\n\tif \"email\" not in session:\n\t\tsession[\"email\"]=\"\"\n\treturn render_template('index.html')\n\n\n@app.route('/valid', methods=['POST'])\ndef validation():\n\tsession['email'] = request.form[\"email\"]\n\tif len(request.form['email']) < 1:\n\t\tflash(\"Email cannot be blank!\", 'email')\n\t\treturn redirect('/')\n\telif not EMAIL_REGEX.match(request.form['email']):\n\t\tflash(\"Email address is not valid\", 'email')\n\t\treturn redirect('/')\n\ttime=datetime.now()\n\tdata={\n\t\t'email':request.form['email'],\n\t\t'created_at': time\n\t\t}\n\tmysql = connectToMySQL('emails')\t\n\tquery= \"SELECT email FROM emails.users where email=%(email)s;\"\n\tsame=mysql.query_db(query,data)\n\tprint(same)\n\tif len(same) > 0:\n\t\tflash(\"Email address is already in use. Please use a different email.\", 'email')\n\t\treturn redirect('/')\n\telse:\n\t\tmysql = connectToMySQL('emails')\n\t\tquery=\"INSERT into emails.users (email,created_at) values (%(email)s,%(created_at)s);\"\n\t\tmysql.query_db(query, data)\n\t\treturn redirect('/success')\n\tprint(same)\n\t\n\n@app.route('/success')\ndef success():\n\temadd = session['email']\n\tmysql = connectToMySQL('emails')\t\n\tquery= \"SELECT email, created_at from emails.users\"\n\temail=mysql.query_db(query)\n\treturn render_template('success.html' , emails=email, emadd=emadd)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)","sub_path":"Python_Stack/flask/flask_mysql/email_validation_w_DB/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"300991438","text":"# -*- coding: utf-8 -*-\n# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)\n# Maximilian Christ (maximilianchrist.com), Blue Yonder Gmbh, 2016\n\nimport pandas as pd\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom tsfresh.feature_selection.feature_selector import check_fs_sig_bh\n\n\nclass FeatureSelector(BaseEstimator, TransformerMixin):\n \"\"\"\n Sklearn-compatible estimator, for reducing the number of features in a dataset to only those,\n that are relevant and significant to a given target. It is basically a wrapper around\n :func:`~tsfresh.feature_selection.feature_selector.check_fs_sig_bh`.\n\n The check is done by testing the hypothesis\n\n :math:`H_0` = the Feature is not relevant and can not be added`\n\n against\n\n :math:`H_1` = the Feature is relevant and should be kept\n\n using several statistical tests (depending on whether the feature or/and the target is binary\n or not). Using the Benjamini Hochberg procedure, only features in :math:`H_0` are rejected.\n\n You can control how the significance tests are executed by handing in a settings object. Please refer to\n :class:`~tsfresh.feature_selection.settings.FeatureSignificanceTestsSettings` for more information.\n If you do not pass a settings object, the defaults are used.\n\n This estimator - as most of the sklearn estimators - works in a two step procedure. First, it is fitted\n on training data, where the target is known:\n\n >>> X_train, y_train = pd.DataFrame(), pd.Series() # fill in with your features and target\n >>> from tsfresh.transformers import FeatureSelector\n >>> selector = FeatureSelector()\n >>> selector.fit(X_train, y_train)\n\n The estimator keeps track on those features, that were relevant in the training step. If you\n apply the estimator after the training, it will delete all other features in the testing\n data sample:\n\n >>> X_test = pd.DataFrame()\n >>> X_selected = selector.transform(X_test)\n\n After that, X_selected will only contain the features that were relevant during the training.\n\n If you are interested in more information on the features, you can look into the member\n ``relevant_features`` after the fit.\n \"\"\"\n def __init__(self, settings=None):\n \"\"\"\n Create a new FeatureSelector instance.\n\n :param settings: The settings to use for feature selection.\n :type settings: tsfresh.feature_selection.settings.FeatureSelectionSettings\n \"\"\"\n self.settings = settings\n self.relevant_features = None\n\n def fit(self, X, y):\n \"\"\"\n Extract the information, which of the features are relevent using the given target.\n\n For more information, please see the :func:`~tsfresh.festure_selection.festure_selector.check_fs_sig_bh`\n function. All columns in the input data sample are treated as feature. The index of all\n rows in X must be present in y.\n\n :param X: data sample with the features, which will be classified as relevent or not\n :type X: pandas.DataFrame or numpy.array\n\n :param y: target vecotr to be used, to classify the features\n :type y: pandas.Series or numpy.array\n\n :return: the fitted estimator with the information, which features are relevant\n :rtype: FeatureSelector\n \"\"\"\n if not isinstance(X, pd.DataFrame):\n X = pd.DataFrame(X.copy())\n\n if not isinstance(y, pd.Series):\n y = pd.Series(y.copy())\n\n df_bh = check_fs_sig_bh(X, y, self.settings)\n self.relevant_features = df_bh.loc[df_bh.rejected].Feature\n\n return self\n\n def transform(self, X):\n \"\"\"\n Delete all features, which were not relevant in the fit phase.\n\n :param X: data sample with all features, which will be reduced to only those that are relevant\n :type X: pandas.DataSeries or numpy.array\n\n :return: same data sample as X, but with only the relevant features\n :rtype: pandas.DataFrame or numpy.array\n \"\"\"\n if self.relevant_features is None:\n raise RuntimeError(\"You have to call fit before.\")\n\n if isinstance(X, pd.DataFrame):\n return X.copy().loc[:, self.relevant_features]\n else:\n return X[:, self.relevant_features.index]\n","sub_path":"tsfresh/transformers/feature_selector.py","file_name":"feature_selector.py","file_ext":"py","file_size_in_byte":4351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"281153120","text":"# Library imports\nfrom sys import exit as sexit\nfrom os import path, remove, listdir, replace\nfrom time import sleep\nfrom shutil import copy, rmtree, move\nfrom queue import Queue\nfrom threading import Event\nimport logging as log\n\n# Local imports\nfrom NetCaller.pixiv_handler import pixiv_download, pixiv_fetch_illustration\nfrom NetCaller.booru_handler import booru_download, booru_fetch_illustration, booru_fetch_siblings, make_subdill\nfrom ImageHandling.DIllustration import DIllustration\nimport global_variables as gv\n\nclass Sourcery():\n \"\"\"This is where the magic happens.\"\"\"\n def __init__(self):\n \n self.currently_sourcing = \"-\"\n self.sourced_count = 0\n self.stopped = False\n self.error_q = Queue()\n self.img_data_q = Queue()\n self.sigkill = Event()\n self.cleanup = Event()\n\n def die(self, message):\n self.error_q.put('[Sourcery] ' + message)\n self.currently_sourcing = 'Stopped'\n self.stopped = True\n gv.Startpage_Class.Options_Class.lock_critical('enabled')\n sexit()\n\n def do_sourcery_Thread(self):\n self.sourced_count = 0\n minsim = gv.config.get('SauceNAO','minsim')\n \n copied_imgs_set = set()\n gv.Startpage_Class.input_lock.acquire()\n self.error_q.put('[Sourcery] Copying input images into working directory')\n #Every input image gets checked if it is already sourced and copied into the working directory\n for image_path in gv.Startpage_Class.input_images_set:\n image_name = path.basename(image_path)\n # If an ImageData instance with the same original name, minsim and rename options already exists, skip\n # TODO check if this is the same (rename_pixiv': gv.config['Pixiv']['rename'], 'rename_danbooru': gv.config['Danbooru']['rename']}) yandere konachan rename\n is_dup = False\n gv.img_data_lock.acquire()\n for data in gv.img_data_array: # {'img_name': img, 'minsim': minsim, 'rename_pixiv': gv.config['Pixiv']['rename'], 'rename_danbooru': gv.config['Danbooru']['rename']}\n if image_name == data.original_PID.sub_dill.name and str(minsim) == str(data.original_PID.sub_dill.minsim):\n is_dup = True\n break\n gv.img_data_lock.release()\n if is_dup:\n self.error_q.put('[Sourcery] Image has already been sourced')\n continue\n try:\n copy(image_path, gv.cwd + '/Sourcery/sourced_original')\n except Exception as e:\n self.die(str(e))\n copied_imgs_set.add((image_name, image_path))\n\n gv.Startpage_Class.input_lock.release()\n unsuccessful = 0\n successful = 0\n # For every input image a request goes out to saucenao and gets decoded\n for image_name, image_path in copied_imgs_set:\n if len(image_name) > 24:\n short_name = image_name[:10] + '...' + image_name[-14:]\n else:\n short_name = image_name \n self.currently_sourcing = short_name\n self.error_q.put('[Sourcery] Sourcing: ' + short_name)\n res = gv.SauceNAOCaller.get_response(image_name, minsim)\n if res[0] == 401:\n # Exception while opening image!\n self.error_q.put('[Sourcery] ' + res[1])\n unsuccessful += 1\n continue\n elif res[0] == 403:\n # Incorrect or Invalid API Key!\n self.die(res[1])\n elif res[0] == 666:\n # Request failed!\n self.die(res[1])\n elif res[0] == 2:\n # generally non 200 statuses are due to either overloaded servers or the user is out of searches\n self.die(res[1] + '\\nSauceNao servers are overloaded\\nor you are out of searches.\\nTry again tomorrow.')\n elif res[0] == 600:\n # One or more indexes are having an issue.\n # This search is considered partially successful, even if all indexes failed, so is still counted against your limit.\n # The error may be transient, but because we don't want to waste searches, allow time for recovery.\n self.die(res[1] + '\\nSauceNao gave a response but there was a problem on their end.\\nStopped further processing of images to give the server time to recover.\\nTry again in a few minutes.')\n elif res[0] == 41:\n # Problem with search as submitted, bad image, or impossible request.\n # Issue is unclear, so don't flood requests.\n if res[3] < 1:\n self.die(res[1] + ' + Out of searches for today')\n else:\n self.error_q.put('[Sourcery] ' + res[1])\n if res[2] < 1:\n sleep(30)\n unsuccessful += 1\n elif res[0] == 402:\n # General issue, api did not respond. Normal site took over for this error state.\n # Issue is unclear, so don't flood requests.\n self.error_q.put('[Sourcery] ' + res[1])\n sleep(10)\n unsuccessful += 1\n elif res[0] == 200:\n d_illust = self.process_img_data_new_Thread(image_name, gv.cwd + '/Sourcery/sourced_original/' + image_name, image_path, res, minsim)\n if d_illust != False:\n self.img_data_q.put(d_illust)\n successful += 1\n else:\n unsuccessful += 1\n if gv.SauceNAOCaller.long_remaining == 0:\n self.die('Out of searches for today')\n if gv.SauceNAOCaller.short_remaining <= 0:\n self.error_q.put('[Sourcery] Sleeping 30 seconds because of SauceNao restrictions')\n sleep(30)\n if self.sigkill.is_set():\n self.sigkill.clear()\n self.error_q.put(\"Successful: \" + str(successful) + \"\\nUnsuccessful: \" + str(unsuccessful))\n if gv.SauceNAOCaller.long_remaining == 0:\n self.currently_sourcing = \"Out of requests\"\n else:\n self.currently_sourcing = \"Stopped\"\n self.stopped = True\n gv.Startpage_Class.Options_Class.lock_critical('enabled')\n sexit()\n if self.cleanup.is_set():\n sexit()\n self.sourced_count = unsuccessful + successful\n if gv.SauceNAOCaller.long_remaining == 0:\n self.currently_sourcing = \"Out of requests\"\n else:\n self.currently_sourcing = \"Finished\"\n self.stopped = True\n self.error_q.put(\"Successful: \" + str(successful) + \" Unsuccessful: \" + str(unsuccessful))\n \n gv.Startpage_Class.Options_Class.lock_critical('enabled')\n\n def process_img_data_new_Thread(self, img_name_original, img_path, input_path, res, minsim):\n \"\"\"\n Downloads the image from pixiv and Danbooru\n Returns information on the downloads\n \"\"\"\n # dict_list is list of dicts of this format: {\"service_name\": service_name, \"illust_id\": illust_id, \"source\": source}\n dict_list = gv.SauceNAOCaller.decode_response(res[1])\n\n illustrations = {key.capitalize(): list() for key in gv.services}\n visited = {key.capitalize(): list() for key in gv.services}\n\n for source in dict_list:\n if source['illust_id'] != 0:\n self.error_q.put('[Sourcery] Fetching ' + source['service_name'] + ' illustration')\n for s in gv.services:\n if s == \"pixiv\":\n illustrations[\"Pixiv\"].extend(self.pixiv_fetcher_Thread(source, visited[\"Pixiv\"]))\n else:\n s = s.capitalize()\n temp = self.booru_fetcher_Thread(source, s, visited[s])\n illustrations[s].extend(temp)\n if (sum([len(x) for x in illustrations.values()]) <= 0):\n self.error_q.put('[Sourcery] No sources were found!')\n try:\n if gv.config.getboolean('Sourcery', 'autosave_original'):\n save_path = path.join(gv.config.get('Sourcery', 'output_dir_ns'), img_name_original)\n if not path.isdir(save_path):\n move(img_path, save_path)\n else:\n for file in listdir(img_path):\n replace(path.join(img_path, file), path.join(save_path, file))\n rmtree(img_path)\n if gv.config.getboolean('Sourcery', 'delete_input'):\n if path.isdir(input_path):\n rmtree(input_path)\n elif path.isfile(input_path):\n remove(input_path)\n else:\n if path.isdir(img_path):\n rmtree(img_path)\n elif path.isfile(img_path):\n remove(img_path)\n except Exception as e:\n print('ERROR [0067] ' + str(e))\n gv.Logger.write_to_log(\"ERROR [0067] \" + str(e), log.ERROR)\n gv.Files.Ref.new_reference(img_name_original, [], [], [], [], [], gv.config['Pixiv']['rename'], gv.config['Danbooru']['rename'], gv.config['Yandere']['rename'], gv.config['Konachan']['rename'], gv.config['Konachan']['rename'], minsim, dict_list, input_path)\n return False\n\n self.error_q.put('[Sourcery] Downloaded illustrations successfully')\n\n refs = {key.capitalize(): list() for key in gv.services}\n\n for serv, ills in illustrations.items():\n for illust in ills:\n refs[serv].append((illust.name, illust.id))\n\n new_reference = (img_name_original, refs[\"Pixiv\"], refs[\"Danbooru\"], refs[\"Yandere\"], refs[\"Konachan\"], refs[\"Gelbooru\"], gv.config['Pixiv']['rename'], gv.config['Danbooru']['rename'], gv.config['Yandere']['rename'], gv.config['Konachan']['rename'], gv.config['Gelbooru']['rename'], minsim, dict_list, input_path)\n ref = gv.Files.Ref.new_reference(*new_reference)\n\n original = {\"name\":img_name_original, \"work_path\":gv.cwd + '/Sourcery/sourced_original/' + img_name_original} \n d_illust = DIllustration(input_path, original, illustrations, ref, minsim)\n\n return d_illust\n\n def pixiv_fetcher_Thread(self, source, visited):\n illustration_list = list()\n name = False\n if source['service_name'] == 'Pixiv':\n if source['illust_id'] not in visited:\n sdillust, found = pixiv_fetch_illustration(source['illust_id'], self.error_q)\n if sdillust != False and not found:\n name = pixiv_download(sdillust, self.error_q)\n if name != False:\n sdillust.init_post_download(name, source)\n if sdillust.validity_check():\n illustration_list.append(sdillust)\n visited.append(source['illust_id'])\n elif found:\n illustration_list.append(sdillust)\n visited.append(source['illust_id'])\n return illustration_list\n\n def booru_fetcher_Thread(self, source, service, visited):\n illustration_list = list()\n def down_illust(ssdillust, found):\n name = False\n if ssdillust != False and not found:\n name = booru_download(ssdillust, self.error_q)\n if name != False:\n ssdillust.init_post_download(name, source)\n if ssdillust.validity_check():\n visited.append(ssdillust.id)\n return ssdillust\n elif found:\n visited.append(ssdillust.id)\n return ssdillust\n else:\n return None\n if source['service_name'] == service and source['illust_id'] not in visited:\n ssdillust, found = booru_fetch_illustration(source['illust_id'], service, self.error_q)\n sdillust = down_illust(ssdillust, found)\n if sdillust != None:\n illustration_list.append(sdillust)\n if sdillust.parent_id != None:\n ills = booru_fetch_siblings(sdillust.parent_id, service, self.error_q)\n for ill in ills:\n if int(ill['id']) in visited:\n continue\n ssdillust, found = make_subdill(service, ill, int(ill['id']))\n sill = down_illust(ssdillust, found)\n if sill != None:\n illustration_list.append(sill)\n \n return illustration_list\n","sub_path":"code/Work/sourcery.py","file_name":"sourcery.py","file_ext":"py","file_size_in_byte":12929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"383396462","text":"\"\"\"Implement mpldatacursor's \"point labels\" using event handlers.\n\"\"\"\nimport matplotlib.pyplot as plt\nimport mplcursors\nimport numpy as np\n\nlabels = ['a', 'b', 'c', 'd', 'e', 'f']\nx = np.array([0, 0.05, 1, 2, 3, 4])\n\n# All points on this figure will point labels.\nfig, ax = plt.subplots()\nline, = ax.plot(x, x, 'ro')\nax.margins(0.1)\n\nmplcursors.cursor(ax).connect(\n \"add\", lambda sel: sel.annotation.set_text(labels[sel.target.index]))\n\nplt.show()\n","sub_path":"examples/labeled_points.py","file_name":"labeled_points.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"57828629","text":"from PIL import Image\nim = Image.open(\"level7.png\")\ncenter=im.size[1]/2 # half-width value\np = im.load() # list of (R,G,B,A), which can be accessed by p[x,y]\nres = [chr(p[4,center][1])] + [chr(p[5+7*i,center][1]) for i in range(90)] # RGB colors from white to black does have the same RGB values , that means R=G=B for every block. Please note that the first block is only 5 pixels, whereas the other \nprint(''.join(res)) \n\n#the commented part below is the actual solution\n#print(chr(105)+chr(110)+chr(116)+chr(101)+chr(103)+chr(114)+chr(105)+chr(116)+chr(121)) \n\n","sub_path":"python challenge lvl 7.py","file_name":"python challenge lvl 7.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"631448427","text":"'''This program will create a user defined corpora from a text file, a word file and a pdf file'''\n\nimport read_word,read_pdf\nimport os\nfrom nltk.corpus.reader.plaintext import PlaintextCorpusReader\n\n# Read text from text file\ndef get_text_from_file (text_file_name):\n file = open(text_file_name, 'r')\n return file.read()\n\n# Create a directory to hold corpus\nnewCorpusDir = 'mycorpus/'\nif not os.path.isdir(newCorpusDir):\n os.mkdir(newCorpusDir)\n\n# Read files word, pdf and text one by one\ntext_file = get_text_from_file('./samples/sample_feed.txt')\ntext_word = read_word.get_text_from_word('./samples/sample_docx_1page.docx')\ntext_pdf = read_pdf.get_text_from_pdf('./samples/sample_pdf_file.pdf')\n\n# Write the text in corpus read from the above files\ntext = [text_file, text_word, text_pdf]\nfor idx, f in enumerate(text):\n with open(newCorpusDir + str(idx) + '.txt', 'w') as fout:\n fout.write(f)\n\n# Create plain text corpus object from newCorpusDir\nnewCorpus = PlaintextCorpusReader(newCorpusDir, '.*')\n\n# Check the words in corpus\nprint(newCorpus.words())\n\n# Check the sentences in file 1.txt\nprint(newCorpus.sents(newCorpus.fileids()[1]))\n\n# Check the paragraphs in file 0.txt\nprint(newCorpus.paras(newCorpus.fileids()[0]))","sub_path":"nlp_with_python_cookbook/practice/user_defined_corpus/user_corpora.py","file_name":"user_corpora.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"90338651","text":"import json\nimport PyMVCS\nimport PyArchive\nfrom PyQt5.QtCore import QFileInfo\nfrom model.util import Util\n\n\nclass RingPhotography360MakerStatus(PyMVCS.Status):\n def __init__(self):\n super(RingPhotography360MakerStatus, self).__init__()\n self.binary = {}\n self.description = ''\n self.title = ''\n self.active = ''\n self.preloads = [{'type': 'Sprite', 'name': 'img', 'count': 0, 'suffix': '.jpg'}]\n\n\nclass RingPhotography360MakerModel(PyMVCS.Model):\n NAME = 'RingPhotography360MakerModel'\n\n def _setup(self):\n self.logger.Trace('RingPhotography360MakerModel.setup')\n self.status = RingPhotography360MakerStatus()\n\n def _dismantle(self):\n self.logger.Trace('RingPhotography360MakerModel.dismantle')\n\n def clean(self):\n self.status.binary = {}\n self.active = ''\n # 发广播给视图层清空ui\n self.Broadcast('/maker/RingPhotography360/clean', None)\n\n def saveAddPhotos(self, _files):\n for file in _files:\n filename = QFileInfo(file).fileName()\n # 忽略已存在的\n if filename in self.status.binary:\n continue\n # 缓存二进制数据\n f = open(file, 'rb')\n self.status.binary[filename] = f.read()\n f.close()\n # 更新ui,更新图片列表\n self.Broadcast('/maker/RingPhotography360/update', None)\n\n def updateActivateFile(self, _file):\n self.logger.Trace(f'activate file: {_file}')\n self.status.active = _file\n self.Broadcast('/maker/RingPhotography360/file/activated', _file)\n\n def saveTitle(self, _value):\n self.title = _value\n\n def saveDescription(self, _value):\n self.description = _value\n\n def saveExport(self, _outFile):\n self.status.preloads[0]['count'] = len(self.status.binary)\n writer = PyArchive.FileWriter()\n writer.Open(_outFile, True)\n # 写入文件\n idx = 0\n for filename in self.status.binary.keys():\n idx += 1\n writer.Write('img#{0}.jpg'.format(idx), self.status.binary[filename])\n writer.Write(\"app.asset\", Util.fileToBytes('./template/RingPhotography360/app.asset'))\n writer.Write(\"app.lua\", Util.fileToBytes('./template/RingPhotography360/app.lua'))\n writer.Write(\"config.lua\", self.renderConfig())\n writer.Write(\"preloads.json\", Util.stringToBytes(json.dumps(self.status.preloads)))\n writer.Close()\n\n def saveImport(self, _inFile):\n self.clean()\n reader = PyArchive.FileReader()\n reader.Open(_inFile)\n for filename in reader.ListEntries():\n self.status.binary[filename] = reader.Read(filename)\n reader.Close()\n self.Broadcast('/maker/RingPhotography360/update', None)\n\n def renderConfig(self):\n lines = ''\n f = open('./template/RingPhotography360/config.lua', 'r')\n scripts = f.read()\n f.close()\n # 替换变量\n scripts = scripts.replace(\"{{__count__}}\", \"{0}\".format(len(self.status.binary)))\n scripts = scripts.replace(\"{{__description__}}\", self.description)\n scripts = scripts.replace(\"{{__title__}}\", self.title)\n # 转换成python bytes\n return Util.stringToBytes(scripts)\n","sub_path":"code/src/model/maker/RingPhotography360.py","file_name":"RingPhotography360.py","file_ext":"py","file_size_in_byte":3302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"328731539","text":"#! /usr/bin/env python\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nimport matplotlib.dates as mdates\nimport os\nimport numpy as np\nimport functions.common as cf\n\n\ndef format_date_axis(axis, figure):\n df = mdates.DateFormatter('%Y-%m-%d')\n axis.xaxis.set_major_formatter(df)\n figure.autofmt_xdate()\n\n\ndef plot_profiles(x, y, colors, stdev=None):\n \"\"\"\n Create a profile plot for mobile instruments\n :param x: .nc data array containing data for plotting variable of interest (e.g. density)\n :param y: .nc data array containing data for plotting on the y-axis (e.g. pressure)\n :param colors: list of colors to be used for plotting\n :param stdev: desired standard deviation to exclude from plotting\n \"\"\"\n if stdev is None:\n xD = x.data\n yD = y.data\n leg_text = ()\n else:\n ind = cf.reject_extreme_values(x.data)\n xdata = x[ind]\n ydata = y[ind]\n \n ind2 = cf.reject_outliers(xdata.data, stdev)\n xD = xdata[ind2].data\n yD = ydata[ind2].data\n outliers = str(len(x) - len(xD))\n leg_text = ('removed {} outliers (SD={})'.format(outliers, stdev),)\n\n fig, ax = plt.subplots()\n plt.grid()\n ax.scatter(xD, yD, c=colors, s=2, edgecolor='None')\n ax.invert_yaxis()\n ax.set_xlabel((x.name + \" (\" + x.units + \")\"), fontsize=9)\n ax.set_ylabel((y.name + \" (\" + y.units + \")\"), fontsize=9)\n ax.legend(leg_text, loc='best', fontsize=6)\n return fig, ax\n\n\ndef plot_timeseries(x, y, stdev=None):\n \"\"\"\n Create a simple timeseries plot\n :param x: array containing data for x-axis (e.g. time)\n :param y: .nc data array for plotting on the y-axis, including data values, coordinates, and variable attributes\n :param stdev: desired standard deviation to exclude from plotting\n \"\"\"\n if stdev is None:\n xD = x\n yD = y.data\n leg_text = ()\n else:\n ind = cf.reject_extreme_values(y.data)\n ydata = y[ind]\n xdata = x[ind]\n\n ind2 = cf.reject_outliers(ydata.data, stdev)\n yD = ydata[ind2].data\n xD = xdata[ind2]\n outliers = str(len(y) - len(yD))\n leg_text = ('removed {} outliers (SD={})'.format(outliers, stdev),)\n\n fig, ax = plt.subplots()\n plt.grid()\n plt.plot(xD, yD, '.', markersize=2)\n\n try:\n y_units = y.units\n except AttributeError:\n y_units = 'no_units'\n\n ax.set_ylabel((y.name + \" (\" + y_units + \")\"), fontsize=9)\n format_date_axis(ax, fig)\n y_axis_disable_offset(ax)\n ax.legend(leg_text, loc='best', fontsize=6)\n return fig, ax\n\n\ndef plot_timeseries_compare(t0, t1, var0, var1, m0, m1, long_name, stdev=None):\n \"\"\"\n Create a timeseries plot containing two datasets\n :param t0: data array of time for dataset 0\n :param t1: data array of time for dataset 1\n :param var0: .nc data array for plotting on the y-axis for dataset 0, including data values and variable attributes\n :param var1: .nc data array for plotting on the y-axis for dataset 1, including data values and variable attributes\n :param stdev: desired standard deviation to exclude from plotting\n \"\"\"\n if stdev is None:\n t0_data = t0.data\n var0_data = var0.data\n leg_text = ('{}'.format(m0),)\n t1_data = t1.data\n var1_data = var1.data\n leg_text += ('{}'.format(m1),)\n else:\n ind0 = cf.reject_extreme_values(var0.data)\n t0i = t0[ind0]\n var0i = var0[ind0]\n\n ind02 = cf.reject_outliers(var0i.data, stdev)\n t0_data = t0i[ind02].data\n var0_data = var0i[ind02].data\n var0_data[var0_data <= 0.0] = np.nan # get rid of zeros and negative numbers\n outliers0 = str((len(var0) - len(var0_data)) + (len(t0_data) - np.count_nonzero(~np.isnan(var0_data))))\n leg_text = ('{}: removed {} outliers (SD={})'.format(m0, outliers0, stdev),)\n\n ind1 = cf.reject_extreme_values(var1.data)\n t1i = t1[ind1]\n var1i = var1[ind1]\n\n ind12 = cf.reject_outliers(var1i.data, stdev)\n t1_data = t1i[ind12].data\n var1_data = var1i[ind12].data\n var1_data[var1_data <= 0.0] = np.nan # get rid of zeros and negative numbers\n outliers1 = str((len(var1) - len(var1_data)) + (len(t1_data) - np.count_nonzero(~np.isnan(var1_data))))\n leg_text += ('{}: removed {} outliers (SD={})'.format(m1, outliers1, stdev),)\n\n try:\n y_units = var0.units\n except AttributeError:\n y_units = 'no_units'\n\n fig, ax = plt.subplots()\n plt.grid()\n #plt.ylim([2000, 2500])\n\n ax.plot(t0_data, var0_data, 'o', markerfacecolor='none', markeredgecolor='r', markersize=5, lw=.75)\n ax.plot(t1_data, var1_data, 'x', markeredgecolor='b', markersize=5, lw=.75)\n ax.set_ylabel((long_name + \" (\" + y_units + \")\"), fontsize=9)\n format_date_axis(ax, fig)\n y_axis_disable_offset(ax)\n ax.legend(leg_text, loc='best', fontsize=6)\n return fig, ax\n\n\ndef plot_timeseries_panel(ds, x, vars, colors, stdev=None):\n \"\"\"\n Create a timeseries plot with horizontal panels of each science parameter\n :param ds: dataset (e.g. .nc file opened with xarray) containing data for plotting\n :param x: array containing data for x-axis (e.g. time)\n :param vars: list of science variables to plot\n :param colors: list of colors to be used for plotting\n :param stdev: desired standard deviation to exclude from plotting\n \"\"\"\n fig, ax = plt.subplots(len(vars), sharex=True)\n\n for i in range(len(vars)):\n y = ds[vars[i]]\n\n if stdev is None:\n yD = y.data\n xD = x\n leg_text = ()\n else:\n ind = cf.reject_extreme_values(y.data)\n ydata = y[ind]\n xdata = x[ind]\n\n ind2 = cf.reject_outliers(ydata.data, stdev)\n yD = ydata[ind2].data\n xD = xdata[ind2]\n outliers = str(len(y) - len(yD))\n leg_text = ('{}: rm {} outliers'.format(y.name, outliers),)\n\n c = colors[i]\n ax[i].plot(xD, yD, '.', markersize=2, color=c)\n ax[i].set_ylabel(('(' + y.units + ')'), fontsize=5)\n ax[i].tick_params(axis='y', labelsize=6)\n ax[i].legend(leg_text, loc='best', fontsize=4)\n y_axis_disable_offset(ax[i])\n if i == len(vars) - 1: # if the last variable has been plotted\n format_date_axis(ax[i], fig)\n return fig, ax\n\n\ndef plot_xsection(subsite, x, y, z, stdev=None):\n \"\"\"\n Create a cross-section plot for mobile instruments\n :param subsite: subsite part of reference designator to plot\n :param x: array containing data for x-axis (e.g. time)\n :param y: .nc data array containing data for plotting on the y-axis (e.g. pressure)\n :param z: .nc data array containing data for plotting variable of interest (e.g. density)\n :param stdev: desired standard deviation to exclude from plotting\n \"\"\"\n z_data = z.data\n # when plotting gliders, remove zeros (glider fill values) and negative numbers\n if 'MOAS' in subsite:\n z_data[z_data <= 0.0] = np.nan\n zeros = str(len(z) - np.count_nonzero(~np.isnan(z_data)))\n\n if stdev is None:\n xD = x\n yD = y.data\n zD = z_data\n else:\n ind = cf.reject_extreme_values(z_data)\n xdata = x[ind]\n ydata = y[ind]\n zdata = z_data[ind]\n \n ind2 = cf.reject_outliers(zdata, stdev)\n xD = xdata[ind2]\n yD = ydata[ind2].data\n zD = zdata[ind2]\n outliers = str(len(z_data) - len(zD))\n\n try:\n zeros\n except NameError:\n zeros = None\n\n try:\n outliers\n except NameError:\n outliers = None\n\n fig, ax = plt.subplots()\n plt.margins(y=.08, x=.02)\n xc = ax.scatter(xD, yD, c=zD, s=2, edgecolor='None')\n ax.invert_yaxis()\n\n # add colorbar\n bar = fig.colorbar(xc, ax=ax, label=(z.name + \" (\" + z.units + \")\"))\n bar\n bar.formatter.set_useOffset(False)\n\n ax.set_ylabel((y.name + \" (\" + y.units + \")\"), fontsize=9)\n format_date_axis(ax, fig)\n\n if zeros is None and type(outliers) is str:\n leg = ('rm: {} outliers (SD={})'.format(outliers, stdev),)\n ax.legend(leg, loc=1, fontsize=6)\n if type(zeros) is str and outliers is None:\n leg = ('rm: {} values <=0.0'.format(zeros),)\n ax.legend(leg, loc=1, fontsize=6)\n if type(zeros) is str and type(outliers) is str:\n leg = ('rm: {} values <=0.0, rm: {} outliers (SD={})'.format(zeros, outliers, stdev),)\n ax.legend(leg, loc=1, fontsize=6)\n return fig, ax\n\n\ndef pressure_var(dataset, vars):\n \"\"\"\n Return the pressure (dbar) variable in a dataset.\n :param vars: list of all variables in a dataset\n \"\"\"\n pressure_variables = ['int_ctd_pressure', 'seawater_pressure', 'ctdpf_ckl_seawater_pressure', 'sci_water_pressure_dbar',\n 'ctdbp_seawater_pressure', 'ctdmo_seawater_pressure', 'ctdbp_no_seawater_pressure',\n 'sci_water_pressure_dbar', 'pressure_depth', 'abs_seafloor_pressure', 'presf_tide_pressure',\n 'presf_wave_burst_pressure', 'pressure', 'velpt_pressure', 'ctd_dbar', 'vel3d_k_pressure',\n 'seafloor_pressure', 'pressure_mbar']\n pvariables = list(set(pressure_variables).intersection(vars))\n pvars = []\n for press_var in pvariables:\n if press_var == 'int_ctd_pressure':\n pvars.append(str(press_var))\n else:\n try:\n units = dataset[press_var].units\n if units in ['dbar', '0.001 dbar']:\n pvars.append(str(press_var))\n except AttributeError:\n continue\n\n if len(pvars) > 1:\n print('More than 1 pressure variable found in the file')\n elif len(pvars) == 0:\n print('No pressure variable found in the file')\n else:\n pvar = str(pvars[0])\n return pvar\n\n\ndef save_fig(save_dir, file_name, res=150):\n # save figure to a directory with a resolution of 150 DPI\n save_file = os.path.join(save_dir, file_name)\n plt.savefig(str(save_file), dpi=res)\n plt.close()\n\n\ndef y_axis_disable_offset(axis):\n # format y-axis to disable offset\n y_formatter = ticker.ScalarFormatter(useOffset=False)\n axis.yaxis.set_major_formatter(y_formatter)\n","sub_path":"functions/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":10293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"368854440","text":"# Axc Perfume -- Sophia Xia, Ryan Aday, Raunak Chowdhury\n# Softdev2 pd8\n# K#08: Ay Mon, Go Git It From Yer Flask\n# 2019-03-08\n\nimport os\n\nfrom flask import Flask, render_template, request, flash\n\nfrom mango import *\n\napp = Flask(__name__)\napp.secret_key = os.urandom(32)\n\n@app.route('/', methods = ['GET', 'POST'])\ndef home():\n vars = {}\n if request.method == 'POST':\n ip = request.form['ip']\n # print('IP: \"{}\"'.format(ip))\n if ip:\n setup(ip)\n id = request.form['id']\n data = find_pokemon_by_id(int(id))\n if len(data) != 0:\n data = data[0]\n vars['pokemon_img'] = data['img']\n vars['id'] = data['id']\n vars['name'] = data['name']\n vars['type'] = data['type']\n vars['weaknesses'] = data['weaknesses']\n vars['data'] = True\n else:\n flash('Not a valid id!')\n return render_template('form.html', **vars)\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"08_mongosite/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"322504600","text":"import cv2\nimport numpy as np\n\nfrom bright.bright_function import edge_enhancement\nimport num_detect.number_geom as geom\n\ndatafolder = '/Users/soua/Desktop/Project/sterling_demo2'\nimgpath = datafolder + '/Img_580.jpg'\n\nimg = cv2.imread(imgpath)\nimg = edge_enhancement(img)\ncv2.imshow('origin', img)\n\nalpha = 1.5\nbeta = 50\nres = cv2.convertScaleAbs(img, alpha=alpha, beta=beta)\ncv2.imshow('convert scaling', res)\n\nhsv = cv2.cvtColor(res, cv2.COLOR_BGR2HSV)\ncv2.imshow('hsv', hsv)\n\nh, s, v = cv2.split(hsv)\nprint('v max : %f, min : %f, mean : %f'%(np.amax(v[:]), np.amin(v[:]), np.mean(v[:])))\nprint('s max : %f, min : %f, mean : %f'%(np.amax(s[:]), np.amin(s[:]), np.mean(s[:])))\ns = s + 50\nhsv = cv2.merge((h, s, v))\ncv2.imshow('alpha beta hsv', hsv)\n# cv2.waitKey(0)\n\nboundaries = [([0, 0, 255], [20, 100, 255])] # RED\nlowerR = np.array(boundaries[0][0], dtype='uint8')\nupperR = np.array(boundaries[0][1], dtype='uint8')\n\nmask = cv2.inRange(hsv, lowerR, upperR)\ncv2.imshow('mask', mask)\n# cv2.waitKey(0)\ncnts, max_cont, approx_cnt = geom.find_main_contour_approx(mask)\ncv2.drawContours(img, max_cont, -1, (255, 0, 0), 2)\ncv2.drawContours(img, approx_cnt, -1, (0, 255, 0), 2)\ncv2.imshow('contours', img)\ncv2.waitKey(0)\n\n# shape, thresh = detect(max_cont, mask)\n# print(1)\n# if shape == 'octagon':\n# cv2.putText(img, \"STOP\", )\n","sub_path":"num_detect/stop_detect.py","file_name":"stop_detect.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"316731097","text":"#\n# 1065_한수.py\n# BaekjoonAlgorithm\n#\n# Created by EonseokYim on 6/8/19, 11:58 PM.\n# Copyright © 2019 EonseokYim. All rights reserved.\n#\n# https://www.acmicpc.net/problem/1065\n#\n\none_number = 0\nnum = int(input())\n\nif 1 <= num < 100:\n print(num)\n\nelif 100 <= num:\n is_one_number = False\n\n i = 100\n while i <= num:\n split_num = list(map(int, list(str(i))))\n\n for j in range(len(split_num)-2):\n if split_num[j] + split_num[j+2] == 2 * split_num[j+1]:\n is_one_number = True\n else:\n is_one_number = False\n break\n\n if is_one_number:\n one_number += 1\n\n i += 1\n\n print(one_number+99)\n\n","sub_path":"일반/1065_한수.py","file_name":"1065_한수.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"398108756","text":"#!/usr/bin/env python3\n\"\"\"My solution to day 10 of Advent of Code\nhttp://adventofcode.com/day/10\n\"\"\"\n\nimport sys\n\ndef look_and_say(phrase):\n last_number = \"\"\n count = 0\n res = \"\"\n for number in phrase:\n if number == last_number:\n count += 1\n elif last_number == \"\":\n last_number = number\n count = 1\n else:\n res += str(count) + last_number\n last_number = number\n count = 1\n res += str(count) + last_number\n return res\n\nif __name__ == '__main__' and len(sys.argv) > 1 and len(sys.argv[1]) > 0:\n phrase = sys.argv[1]\n for i in range(40):\n phrase = look_and_say(phrase)\n print(\"Length after 40 runs:\", len(phrase))\n\n phrase = sys.argv[1]\n for i in range(50):\n phrase = look_and_say(phrase)\n print(\"Length after 50 runs:\", len(phrase))\n","sub_path":"day10.py","file_name":"day10.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"194202349","text":"import numpy as np\nfrom adaboost import AdaBoost, AdaBoostTextbook\nfrom utils import Dataset\n\n\ndef test(model, dataset, name):\n X_test, y_test = dataset.get_dataset()\n pred = np.array([model.predict(x) for x in X_test])\n accuracy = (y_test == pred).sum() / y_test.size\n print(f'{name} version accuracy: {accuracy:.1f}')\n\n\nif __name__ == '__main__':\n dataset = Dataset('./training-data.txt')\n test_dataset = Dataset('./testing-data.txt')\n\n model = AdaBoost(9)\n model.train(dataset)\n accuracy = test(model, test_dataset, 'Original')\n\n model_tb = AdaBoostTextbook(9)\n model_tb.train(dataset, 0.2, 2)\n accuracy_tb = test(model_tb, test_dataset, 'Textbook')\n","sub_path":"homework/hw6/hw6.py","file_name":"hw6.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"708345","text":"import networkx as nx\nimport random\n\n\n# 根据RR反向可达原则找到高影响力节点\ndef get_seeds_by_rr(social_network, community_nodes, num, rank):\n # @传入参数: 社区发现得到的社区,随机选择的节点数目,选取输出的节点数目\n # 记录算法发现的可能的高影响力节点\n potential_influential_nodes = {}\n res = []\n community_detected = nx.subgraph(social_network, community_nodes)\n # 传入的社区是无向图,如何转变为有向图 20181224:直接强行转换的方法可能不行,因为不知道community包内部的具体实现流程\n # 20181226 考虑到可以使用community包中的sub_graph方法,根据传入的总图以及选择的结点,可以获得该结点所在的图\n # 那么所需要做的就是获取到划分社区中包含的节点,然后将节点作为输入调用该方法,即可获得该社区的有向图\n\n # 根据传入的社区随机选择一定数量的节点\n random_selected_nodes = get_nodes_random(community_detected, num)\n # 调用community包中的方法来反转社区的关系,以找到结点的反向可达结点\n reversed_community = nx.reverse_view(community_detected)\n # 对每个节点进行反向可达算法,找到并记录可能影响到该节点的高影响力节点\n for current_node in list(random_selected_nodes):\n # 找到可以影响到当前节点的邻居\n current_neighbors = nx.neighbors(reversed_community, current_node)\n # 循环记录\n for neighbor in list(current_neighbors):\n if neighbor in potential_influential_nodes:\n potential_influential_nodes[neighbor] += 1\n else:\n potential_influential_nodes[neighbor] = 1\n # 根据节点在传播过程中出现的次数来排序,选取排名靠前的节点作为输出\n sorted_nodes = sorted(potential_influential_nodes.items(), key=lambda d: d[1], reverse=True)\n if len(sorted_nodes) < rank:\n rank = len(sorted_nodes)\n for index in range(rank):\n res.append(sorted_nodes[index][0])\n return res\n\n\n# 随机游走筛选可能的高影响力结点(已弃置,在改进后,选择使用基于随机游走的反向可达算法来选取高影响力结点)\ndef get_seeds_by_random_walk(whole_coms, com_nodes, num, rank):\n # @传入参数: 社区发现得到的社区,随机选择的节点数目,选取输出的节点数目\n # 记录算法发现的可能的高影响力节点\n potential_influential_nodes = {}\n res = []\n whole_step = 15\n # 获取该社区的有向图\n com_graph = nx.subgraph(whole_coms, com_nodes)\n # 根据传入的社区随机选取一定数量的结点\n random_selected_nodes = get_nodes_random(com_graph, num)\n # 随机游走算法核心代码\n # 算法核心思想:根据随机选择的结点,每次随机的从一个节点跳转到另外一个节点,几率选择为一致,即跳转到另一节点时所有的概率\n # 都是一致的,并不做太多的修改。\n # 对随机选取的所有节点依次进行随机游走,并记录下结点出现的次数\n for current_node in list(random_selected_nodes):\n # 循环迭代,随机游走k步\n for i_step in range(whole_step):\n # 找到全部邻居节点,并随机选择出一个节点作为下一步状态的起始\n # node_neighbor = nx.neighbors(com_graph, current_node)\n node_neighbor = com_graph.neighbors(current_node)\n # node_neighbor = com_graph.neighbors('399')\n list_neighbor = list(node_neighbor).copy()\n # 无邻居节点可传播则提前结束随机游走过程\n count = len(list_neighbor)\n if len(list_neighbor) == 0:\n break\n \"\"\"\n # 舍弃原因,不知为何,generator类型的生成器,使用list()函数后,len()函数会返回0,故只能使用笨办法\n if len(list(node_neighbor)) == 0:\n break\n \"\"\"\n random_index = random.randint(0, count - 1)\n index = 0\n for neighbor in list_neighbor:\n if index == random_index:\n # 记录所有节点在游走过程中出现的次数\n if neighbor in potential_influential_nodes:\n potential_influential_nodes[neighbor] += 1\n else:\n potential_influential_nodes[neighbor] = 1\n else:\n index += 1\n\n # 根据节点在传播过程中出现的次数来排序,选取排名靠前的节点作为输出\n sorted_nodes = sorted(potential_influential_nodes.items(), key=lambda d: d[1], reverse=True)\n if len(sorted_nodes) < rank:\n rank = len(sorted_nodes)\n for sorted_index in range(rank):\n res.append(sorted_nodes[sorted_index][0])\n return res\n\n\n# 使用改进后的基于随机游走的反向可达算法来选取可能的高影响力结点\ndef get_seeds_by_rsrw(g, com_nodes, ini_num, rank, step=200, whole_iters=100):\n '''\n 根据传入的参数选择备选节点加入备选节点库\n :param g: 整体的社交网络\n :param com_nodes: 当前社区\n :param ini_num: 初始随机选取节点数\n :param rank: 最终选取节点数\n :param step: 随机游走步数\n :param whole_iters: 模拟次数\n :return:\n '''\n pot_nodes = {}\n res = []\n # 根据社区的节点获取社区有向图\n community_directed_graph = nx.subgraph(g, com_nodes)\n # 根据原有向图获取反向图\n community_reversed_graph = nx.reverse_view(community_directed_graph)\n for iteration in range(whole_iters):\n # 从社区中随机选取一些节点\n random_selected_nodes = random.sample(list(community_directed_graph.nodes), ini_num)\n # print('随机节点选取完成。')\n index = 1\n for current_node in list(random_selected_nodes):\n walk_node = current_node\n for current_step in range(step):\n # 找到可以影响到当前节点的邻居\n current_neighbors = nx.neighbors(community_reversed_graph, walk_node)\n # 循环记录\n for neighbor in list(current_neighbors):\n if neighbor in pot_nodes:\n pot_nodes[neighbor] += 1\n else:\n pot_nodes[neighbor] = 1\n # 找到全部邻居节点,并随机选择出一个节点作为下一步状态的起始\n node_neighbors = list(community_directed_graph.neighbors(walk_node))\n nei_count = len(node_neighbors)\n # 若该节点已经无法游走,则直接停止,换下一节点\n if nei_count == 0:\n break\n walk_node = node_neighbors[random.randint(0, nei_count - 1)]\n # print('第%d个节点游走已完成。' % index)\n index += 1\n\n # print('随机游走与反向可达完成。')\n # 根据节点在传播过程中出现的次数来排序,选取排名靠前的节点作为输出\n sorted_nodes = sorted(pot_nodes.items(), key=lambda d: d[1], reverse=True)\n if len(sorted_nodes) < rank:\n rank = len(sorted_nodes)\n for sorted_index in range(rank):\n res.append(sorted_nodes[sorted_index][0])\n # print('节点选取完成。')\n return res\n\n\ndef get_nodes_by_degree(g, com_nodes, rank):\n '''\n :param g: 社交网络\n :param com_nodes: 社区中所包含的节点\n :param rank: 应从中选取的节点数量\n :return:\n '''\n g_com = nx.subgraph(g, com_nodes)\n degree_record = []\n res = []\n for node in com_nodes:\n temp = []\n temp.append(node)\n temp.append(g_com.degree(node))\n degree_record.append(temp)\n degree_record = sorted(degree_record, key=lambda d: d[1], reverse=True)\n\n for i in range(rank):\n res.append(degree_record[i][0])\n\n return res\n\n\ndef get_digg_nodes_by_degree(g, rank):\n '''\n :param g: 社交网络\n :param rank: 应从中选取的节点数量\n :return:\n '''\n g_nodes = list(g.nodes).copy()\n degree_record = []\n res = []\n for node in g_nodes:\n temp = []\n temp.append(node)\n temp.append(g.degree(node))\n degree_record.append(temp)\n degree_record = sorted(degree_record, key=lambda d: d[1], reverse=True)\n\n for i in range(rank):\n res.append(degree_record[i][0])\n return res\n\n\n# 在社区中随机选择节点(已通过)(20190207 可以通过python内置算法替代完成 已弃置)\ndef get_nodes_random(community_detected, num):\n selected_nodes = {}\n community_detected = nx.Graph(community_detected)\n nodes = community_detected.nodes\n number_of_nodes = len(nodes)\n\n current_node_num = 0\n while current_node_num < num:\n # 随机的选择节点序号\n index = random.randint(0, number_of_nodes - 1)\n if list(nodes)[index] in selected_nodes:\n continue\n else:\n selected_nodes[list(nodes)[index]] = 1\n current_node_num += 1\n return selected_nodes\n\n\ndef test_random():\n # 根据传入的网络随机的选取节点-单元测试\n nodes = [0, 1, 2, 3, 4, 5, 6]\n edges = [(1, 0), (3, 0), (5, 0), (0, 2), (5, 4), (2, 6), (4, 6)]\n graph = nx.Graph()\n graph.add_nodes_from(nodes)\n graph.add_edges_from(edges)\n\n node_select = get_nodes_random(graph, 4)\n print(node_select)\n print(list(node_select))\n\n\nif __name__ == '__main__':\n nodes = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n edges = [(1, 2), (1, 4), (2, 3), (2, 6), (3, 5), (3, 7), (4, 2), (4, 3), (4, 8), (6, 3), (8, 7), (9, 7), (9, 5)]\n graph = nx.DiGraph()\n graph.add_nodes_from(nodes)\n graph.add_edges_from(edges)\n nodes_set = {1, 2, 3, 4, 6}\n res = get_nodes_by_degree(graph, nodes_set, 2)\n print(res)\n","sub_path":"pre_select.py","file_name":"pre_select.py","file_ext":"py","file_size_in_byte":9978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"55392536","text":"#!/usr/bin/python3\n\nfrom PyQt5.QtCore import QDateTime\nfrom transactions.TransactionEntry import TransactionEntry\nimport copy\n\n\nclass Transaction():\n TYPE_TRADE = 0\n TYPE_WITHDRAWAL = 1\n TYPE_DEPOSIT = 2\n TYPE_TRANFER = 3\n TYPE_NOTYPE = 255\n\n def __init__(self):\n self.id = 0\n self.timestamp = QDateTime.currentDateTime()\n self.comment = \"\"\n self.type = self.TYPE_NOTYPE\n self.contents = [0, 0, 0]\n self.buy = TransactionEntry()\n self.sell = TransactionEntry()\n self.fee = TransactionEntry()\n\n def clearTransaction(self):\n self.id = -1\n self.type = Transaction.TYPE_NOTYPE\n self.comment = \"\"\n self.date = QDateTime.currentDateTime()\n self.buy = TransactionEntry()\n self.sell = TransactionEntry()\n self.fee = TransactionEntry()\n for i in range(0, len(self.contents)):\n self.contents[i] = 0\n\n def contains(self, type):\n return self.contents[type]\n","sub_path":"crypto-gains/transactions/Transaction.py","file_name":"Transaction.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"380478900","text":"import numpy as np\nimport h5py\nimport sys\nimport matplotlib.pyplot as plt\n\nfrom utils import plotters\n\ndef make_mask_rect(im_shape, mask_params):\n mask = np.zeros( im_shape ) + 1\n if type(mask_params) != type([]):\n mask_params = [mask_params]\n for m_params in mask_params:\n tmp_mask = np.zeros( im_shape ) + m_params.outside\n tmp_mask[ m_params.xstart:m_params.xstop, m_params.ystart:m_params.ystop ] = m_params.inside\n mask = mask * tmp_mask\n return mask\n\n\nclass mask_def(object):\n def __init__(self, vals):\n self.xstart = vals[0] \n self.xstop = vals[1]\n self.ystart = vals[2]\n self.ystop = vals[3]\n self.inside = 0.0+vals[4]\n self.outside = 1.0-self.inside\n\ndef quick_mask(shape, params):\n f = open(params,'r')\n defs = []\n for line in f:\n keys = line.split()\n tmp = []\n for key in keys:\n tmp.append( int(key) )\n defs.append( mask_def(tmp) )\n f.close()\n mask = np.zeros(shape) + 1\n for md in defs:\n mask = mask*make_mask_rect(shape, md)\n return mask\n\n\ndef run(f,params):\n img = h5py.File(f,'r')['/template/median'].value\n defs = []\n f = open(params,'r')\n for line in f:\n keys = line.split()\n tmp = []\n for key in keys:\n tmp.append( int(key) )\n defs.append( mask_def(tmp) )\n f.close()\n\n mask = img*0.0+1\n for md in defs:\n mask = mask*make_mask_rect(img.shape, md)\n np.save( 'mask', mask )\n fname= 'tmp_mask.png'\n plotters.plot_equalized_template( mask*img, fname, True ) \n\n\n\nif __name__ == \"__main__\":\n run( sys.argv[1], sys.argv[2] ) \n","sub_path":"utils/mask_tools.py","file_name":"mask_tools.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"121413738","text":"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass BinarySVM():\n def __init__(self, X, y, gd_alpha=0.001, alpha=10., batch_size=200, epoch=100):\n \"\"\"This fuction defines the graph of the SVM model\"\"\"\n # define training values\n self.X = X\n self.y = y\n self.data = tf.placeholder(shape=[None, len(self.X[0])], dtype=tf.float32, name='data')\n self.target = tf.placeholder(shape=[None, 1], dtype=tf.float32, name='target')\n self._optimize = None\n self._prediction = None\n self._accuracy = None\n self.gd_alpha = gd_alpha\n self.alpha = alpha\n self.batch_size = batch_size\n self.epoch = epoch\n num_of_attr = len(self.X[0])\n self.__weights = tf.Variable(tf.random_normal(shape=[num_of_attr, 1]), name='Weights')\n self.__bias = tf.Variable(tf.random_normal(shape=[1, 1]), name='bais')\n\n @property\n def prediction(self):\n \"\"\"If the predict functions runs for the first time it will set all the weights and biases and return _predict variable \"\"\"\n if self._prediction is None:\n model_output = tf.subtract(tf.matmul(self.data, self.__weights), self.__bias, name='Model_Output')\n self._prediction = model_output\n return self._prediction\n\n @property\n def optimize(self):\n \"\"\"This function is used to optimize the weights and bias of the model \"\"\"\n if self._optimize is None:\n l2_norm = tf.reduce_sum(tf.square(self.__weights))\n classificaton_term = tf.reduce_mean(tf.maximum(0., tf.subtract(1., tf.multiply(self.target, self.prediction))))\n loss = tf.add(classificaton_term, tf.multiply(self.alpha, l2_norm))\n myopt = tf.train.GradientDescentOptimizer(0.01)\n train_step = myopt.minimize(loss)\n self._optimize = train_step\n return self._optimize\n\n @property\n def accuracy(self):\n \"\"\"Calculates the accuracy based on given attributes and target value\"\"\"\n if self._accuracy is None:\n prediction = tf.sign(self._prediction)\n self._accuracy = tf.reduce_mean(tf.cast(tf.equal(prediction, self.target), tf.float32))\n return self._accuracy\n\n def fit(self):\n sess = tf.Session()\n init = tf.global_variables_initializer()\n sess.run(init)\n acc_temp = list()\n for i in range(self.epoch):\n rand_index = np.random.choice(len(self.X), size=self.batch_size)\n rand_x = self.X[rand_index]\n rand_y = np.transpose([self.y[rand_index]])\n sess.run(self.optimize, feed_dict={self.data: rand_x, self.target: rand_y})\n rand_index = np.random.choice(len(self.X), size=self.batch_size)\n rand_x = self.X[rand_index]\n rand_y = np.transpose([self.y[rand_index]])\n test_accuracy = sess.run(self.accuracy, feed_dict={self.data: rand_x, self.target: rand_y})\n print(\"The accuracy at epoch\",i,\"=\",test_accuracy)\n","sub_path":"BinarySVM.py","file_name":"BinarySVM.py","file_ext":"py","file_size_in_byte":3038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"606415284","text":"#Person class\r\nclass person :\r\n def __init__ (self, id, profession, garlicLover, maxVisitors) :\r\n self.id = id\r\n self.profession = profession\r\n self.garlicLover = garlicLover\r\n self.maxVisitors = maxVisitors\r\n self.possibleRoomsList = []\r\n self.inRoom = None\r\n self.roomConstrainValue = {}\r\n\r\n def printPerson(self) :\r\n print(\"PERSON:\", \"ID:\", self.id, \", Profession:\", self.profession, \", GarlicLover:\", self.garlicLover, \", MaxVisitors:\", self.maxVisitors, \", Room Ref:\", self.inRoom)\r\n for possibleRooms in self.possibleRoomsList :\r\n print(\"Possible Rooms: \", possibleRooms.id)\r\n\r\n #def addPossibleRoomsToList(self, room) :\r\n # if CheckConstraints(self, room) :\r\n # self.possibleRoomsList.append(room)\r\n\r\n #def removeRoomsByConstraints(self) :\r\n # for rooms in self.possibleRoomsList :\r\n # if not CheckConstraints(self, rooms) :\r\n # self.possibleRoomsList.remove(rooms)\r\n\r\n \r\n\r\n#End of Person class\r\n\r\n#Room class\r\nclass room :\r\n def __init__ (self, id, workPlaces, visitorPlaces) :\r\n self.id = id\r\n self.workPlaces = workPlaces\r\n self.visitorPlaces = visitorPlaces\r\n self.near = []\r\n self.workersList = []\r\n #self.constrainingNumber = {}\r\n\r\n #Room class member functions\r\n def AddNearRoom(self, room2) :\r\n self.near.append(room2)\r\n\r\n def printRoom(self) :\r\n print(\"ROOM:\", \"ID:\", self.id, \", WorkPlaces:\", self.workPlaces, \", Visitors:\", self.visitorPlaces)\r\n for worker in self.workersList :\r\n print(\"Person ref: \", worker.id)\r\n#End of Room class\r\n\r\n#List class\r\nclass List :\r\n roomsList = []\r\n personsList = []\r\n personInRoomsDict = {} \r\n\r\n #List class member functions\r\n def printRoomsList(self) :\r\n for rooms in self.roomsList :\r\n rooms.printRoom()\r\n\r\n def printPersonsList(self) :\r\n for persons in self.personsList :\r\n persons.printPerson()\r\n\r\n def ReadroomsFromFile(self) :\r\n dataFile = open(\"rooms.txt\")\r\n for dataLine in dataFile.readlines() :\r\n dataLine = dataLine.split(';')\r\n self.roomsList.append(room(dataLine[0],int(dataLine[1]),int(dataLine[2])))\r\n dataFile.close()\r\n del dataFile\r\n\r\n def ReadpersonsFromFile(self) :\r\n dataFile = open(\"persons.txt\")\r\n for dataLine in dataFile.readlines() :\r\n dataLine = dataLine.split(';')\r\n garlic = str2bool(dataLine[2])\r\n self.personsList.append(person(dataLine[0],dataLine[1],garlic,int(dataLine[3])))\r\n dataFile.close()\r\n del dataFile\r\n\r\n def ReadConnectionsFromFile(self) :\r\n dataFile = open(\"roomsConnect.txt\")\r\n for dataLine in dataFile.readlines() :\r\n dataLine = dataLine.split(';')\r\n room1 = self.GetRoomById(dataLine[0])\r\n room2 = self.GetRoomById(dataLine[1][:-1])\r\n room1.AddNearRoom(room2)\r\n dataFile.close()\r\n del dataFile\r\n\r\n def GetRoomById(self, id) :\r\n for room in self.roomsList :\r\n if room.id == id :\r\n return room\r\n\r\n def updatePossibleRooms(self) :\r\n for persons in self.personsList :\r\n for rooms in self.roomsList :\r\n if CheckConstraints(persons, rooms) and (rooms not in persons.possibleRoomsList) :\r\n persons.possibleRoomsList.append(rooms)\r\n elif not CheckConstraints(persons, rooms) and (rooms in persons.possibleRoomsList) :\r\n persons.possibleRoomsList.remove(rooms)\r\n\r\n def findMostConstrainedPerson(self) :\r\n mostConstrainedPerson = None\r\n shortestLenght = len(self.roomsList)+1\r\n noneInRoom = True\r\n for persons in self.personsList :\r\n if persons.inRoom != None :\r\n noneInRoom = False\r\n if noneInRoom == True :\r\n for persons in self.personsList :\r\n if mostConstrainedPerson == None and persons.inRoom == None and persons.profession == \"Boss\" :\r\n mostConstrainedPerson = persons\r\n shortestLenght = len(persons.possibleRoomsList)\r\n elif len(persons.possibleRoomsList) < shortestLenght and persons.inRoom == None and persons.profession == \"Boss\":\r\n mostConstrainedPerson = persons\r\n shortestLenght = len(persons.possibleRoomsList)\r\n else :\r\n for persons in self.personsList :\r\n if mostConstrainedPerson == None and persons.inRoom == None :\r\n mostConstrainedPerson = persons\r\n shortestLenght = len(persons.possibleRoomsList)\r\n elif len(persons.possibleRoomsList) < shortestLenght and persons.inRoom == None:\r\n mostConstrainedPerson = persons\r\n shortestLenght = len(persons.possibleRoomsList)\r\n return mostConstrainedPerson\r\n\r\n def findLeastConstrainingRoom(self, mostConstPerson) : \r\n temp = 0\r\n tempRoomList = list(mostConstPerson.possibleRoomsList)\r\n for rooms in tempRoomList :\r\n AllocatePersonsToRooms(mostConstPerson, rooms)\r\n self.updatePossibleRooms()#\r\n temp = self.findNumberOfConstraines(mostConstPerson)\r\n mostConstPerson.roomConstrainValue[rooms] = temp\r\n deAllocatePersonsToRooms(mostConstPerson, rooms)\r\n self.updatePossibleRooms()\r\n #for keys in mostConstPerson.roomConstrainValue.keys() :\r\n # print(keys.id, mostConstPerson.roomConstrainValue)\r\n\r\n def findNumberOfConstraines(self, person) :\r\n numberOfConstraints = 0\r\n for persons in self.personsList :\r\n if person != persons :\r\n numberOfConstraints = numberOfConstraints + len(persons.possibleRoomsList)\r\n return numberOfConstraints\r\n\r\n def sortConstraningRooms(self, Person) :\r\n tempDict = {}\r\n Person.possibleRoomsList = []\r\n while len(Person.roomConstrainValue) != 0 :\r\n tempRoom = max(Person.roomConstrainValue, key=Person.roomConstrainValue.get)\r\n Person.possibleRoomsList.append(tempRoom)\r\n Person.roomConstrainValue.pop(tempRoom)\r\n #print(\"yolo\", tempRoom)\r\n\r\n#End of List class\r\n\r\n#Constraints\r\n#Takes the room the boss sits in and checks if the boss works alone in there.\r\ndef bossAloneInOfficeConstraint(person, room) :\r\n if person.profession == \"Boss\" :\r\n if len(room.workersList) > 0 :\r\n return False\r\n else :\r\n return True\r\n elif len(room.workersList) != 0 :\r\n if room.workersList[0].profession == \"Boss\" :\r\n return False\r\n else :\r\n return True\r\n else :\r\n return True\r\n\r\ndef garlicLoversSitTogetherConstraint(person, room) :\r\n if person.garlicLover :\r\n for workers in room.workersList :\r\n if not workers.garlicLover :\r\n return False\r\n else :\r\n return True\r\n else :\r\n for workers in room.workersList :\r\n if not workers.garlicLover :\r\n return True\r\n else :\r\n return False\r\n return True\r\n\r\ndef secretaryAndItSupportNextToBossConstraint(person, room) :\r\n if person.profession == \"Secretary\" or person.profession == \"It-support\" :\r\n for nextToRoom in room.near :\r\n for worker in nextToRoom.workersList :\r\n if worker.profession == \"Boss\" :\r\n return True\r\n else : \r\n return False\r\n else :\r\n return True\r\n\r\ndef enoughVisitorSpaceConstraint(person, room) :\r\n if person.maxVisitors <= room.visitorPlaces :\r\n sumVisitors = 0\r\n for workers in room.workersList :\r\n sumVisitors = sumVisitors + workers.maxVisitors\r\n if person.maxVisitors + sumVisitors <= room.visitorPlaces :\r\n return True\r\n else :\r\n return False\r\n return False\r\n\r\ndef enoughWorkPlacesConstraint(person, room) :\r\n if person.inRoom == None and len(room.workersList) < room.workPlaces :\r\n return True\r\n else:\r\n return False\r\n\r\n#End of Constrains\r\n\r\ndef allPersonsInRooms(personsList) :\r\n for persons in personsList :\r\n if persons.inRoom == None :\r\n return False\r\n return True\r\n\r\ndef backtrackingSearch(L) :\r\n return recursiveBacktracking(L)\r\n\r\ndef recursiveBacktracking(L) :\r\n if allPersonsInRooms(L.personsList) :\r\n #L.printPersonsList()\r\n return L.personInRoomsDict\r\n else :\r\n MostConstrainedPerson = L.findMostConstrainedPerson()\r\n L.findLeastConstrainingRoom(MostConstrainedPerson)\r\n L.sortConstraningRooms(MostConstrainedPerson) \r\n if CheckConstraints(MostConstrainedPerson, MostConstrainedPerson.possibleRoomsList[0]) :\r\n if MostConstrainedPerson.inRoom == None and len(MostConstrainedPerson.possibleRoomsList[0].workersList) < MostConstrainedPerson.possibleRoomsList[0].workPlaces :\r\n L.personInRoomsDict[MostConstrainedPerson] = MostConstrainedPerson.possibleRoomsList[0]\r\n AllocatePersonsToRooms(MostConstrainedPerson, MostConstrainedPerson.possibleRoomsList[0]) \r\n L.updatePossibleRooms()\r\n for keys in L.personInRoomsDict.keys() :\r\n print(\"person:\", keys.id, \"Room:\", keys.inRoom.id)\r\n print(\"----------------------------------------------\")\r\n result = recursiveBacktracking(L) \r\n print(result)\r\n if result :\r\n return result\r\n deAllocatePersonsToRooms(MostConstrainedPerson, MostConstrainedPerson.inRoom)\r\n L.personInRoomsDict.pop(MostConstrainedPerson)\r\n L.updatePossibleRooms()\r\n L.sortConstraningRooms(MostConstrainedPerson)\r\n #print(MostConstrainedPerson.possibleRoomsList)\r\n MostConstrainedPerson.possibleRoomsList.remove(MostConstrainedPerson.possibleRoomsList[0])\r\n return False\r\n\r\ndef str2bool (value) :\r\n return value.lower() in (\"true\")\r\n\r\ndef CheckConstraints(person, room) :\r\n BossConstraintTest = bossAloneInOfficeConstraint(person, room)\r\n #print(\"Boss Alone Test: \", BossConstraintTest)\r\n NextToBossConstraintTest = secretaryAndItSupportNextToBossConstraint(person, room)\r\n #print(\"Next to Boss Test: \", NextToBossConstraintTest)\r\n GarlicLoverConstraintTest = garlicLoversSitTogetherConstraint(person, room)\r\n #print(\"Garlic Lover Test: \", GarlicLoverConstraintTest)\r\n VisitorsPlaceConstraintTest = enoughVisitorSpaceConstraint(person, room)\r\n #print(\"Visitors Place Test: \", VisitorsPlaceConstraintTest)\r\n WorkPlacesConstraint = enoughWorkPlacesConstraint(person, room)\r\n if BossConstraintTest and NextToBossConstraintTest and GarlicLoverConstraintTest and VisitorsPlaceConstraintTest :\r\n return True\r\n else :\r\n return False\r\n\r\ndef AllocatePersonsToRooms(person, room) :\r\n person.inRoom = room\r\n room.workersList.append(person)\r\n\r\ndef deAllocatePersonsToRooms(person, room) :\r\n person.inRoom = None\r\n room.workersList.remove(person)\r\n\r\n#Main function\r\ndef main() :\r\n L = List()\r\n L.ReadroomsFromFile()\r\n L.ReadpersonsFromFile()\r\n L.ReadConnectionsFromFile()\r\n L.updatePossibleRooms()\r\n #personA = L.findMostConstrainedPerson()\r\n #personA.printPerson()\r\n #Room = L.sortLeastConstrainedRoom(personA)\r\n #Room.printRoom()\r\n result = backtrackingSearch(L)\r\n #for keys in L.personInRoomsDict.keys() :\r\n # print(\"person:\", keys.id, \"Room:\", keys.inRoom.id)\r\n #Room.printRoom()\r\n #for rooms in L.roomsList :\r\n # for persons in L.personsList :\r\n # AllocatePersonsToRooms(persons, rooms) \r\n #L.printRoomsList()\r\n #L.printPersonsList()\r\n\r\n#End of Main function\r\n\r\nmain()","sub_path":"AI/AI/Task2/Task2.py","file_name":"Task2.py","file_ext":"py","file_size_in_byte":12023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"578884020","text":"import plotly.plotly as py\nfrom plotly.graph_objs import Bar\n\ndef plot_history(history, name):\n\tpy.sign_in('mitchhydras','wi2qaykd8l')\n\tdays = []\n\ttimes = []\n\n\tfor date in history.keys():\n\t\tdays.append(str(date).split(\" \")[0])\n\t\ttimes.append(history[date].get_mins())\n\n\ttrace0 = Bar(x=days,\n\t y=times\n\t)\n\n\tdata = [trace0]\n\n\tunique_url = py.plot(data, filename = name + \"netflix-history\")","sub_path":"plot_netflix.py","file_name":"plot_netflix.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"372349432","text":"from Panda import *\n\nsphere(size = -800, texture = \"spaceimage2.jpeg\", hpr = HPR(time/3, 0, 0))\n\nc=cube(\"brianpic1.jpg\",\"brianpic2.jpg\",\"brianpic3.jpg\",\"brianpic1.jpg\",\"brianpic2.jpg\",\"brianpic3.jpg\")\nc.hpr = HPR(time*2, sin(time), 0)\nc.position = P3(0,2,0)\nfragments = blastPicture(\"brianpic3.jpg\", 15, 15)\nfragments1 = blastPicture(\"brian-name.jpg\", 15, 15)\n\n\nfor p in fragments1:\n path = at(P3(random11()*10,-20,random01()))+move(5, P3(0,0,0)) + to((p.x+p.y*15)/70.0 + 2,p.location + P3(0,8,0))\n p.position = itime(path)\nfor p in fragments:\n path = at(p.location) + to(2+random01(),p.location) + to(3, P3((p.x-5)*randomRange(12, 22), randomRange(8, 18), randomRange(10, 20)))\n p.position = itime(path)\ncp = at(P3(0,-9,0))+to(5, P3(0,3,0))\ncamera.position=itime(cp)\nstart()","sub_path":"Demos and Tests/Student Work - 2012/Brian1/Nametag/brian-nametag.py","file_name":"brian-nametag.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"294719787","text":"from matplotlib import pyplot as plt\nimport numpy as np\n\nplt.xkcd()\n\nfig = plt.figure()\nax = fig.add_subplot(1, 1, 1)\nax.spines['right'].set_color('none')\nax.spines['top'].set_color('none')\nplt.xticks([])\nplt.yticks([])\nax.set_ylim([0, 10])\nax.set_xlim([0, 10])\n\nx = np.arange(0, 10, 0.1).tolist()\ny = [5] * 40\nfor i in range(40, len(x)):\n y.append(0.1 * x[i] * x[i] - 0.8 * x[i] + 6.6)\n\nplt.annotate(\n 'Had an interview with\\n Hongkong reporters',\n xy=(5, 5), arrowprops=dict(arrowstyle='->'), xytext=(3, 2))\n\nplt.plot(x, y)\n\nplt.xlabel('Time')\nplt.ylabel('His Life Expectancy')\n\nplt.title(\"You know who HE is\")\n\nplt.show()\n","sub_path":"xkcd/He/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"139066307","text":"import pickle\nimport pygame as pg\nimport sys\n\nfrom Assets import *\nimport State\nimport Funcs\nimport Scripts\n\n\nclass Button(pygame.sprite.Sprite):\n\n text = '---'\n font = 0\n\n def __init__(self, rect):\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.transform.scale(menu_button, [rect[2], rect[3]])\n self.rect = self.image.get_rect()\n self.rect.left = rect[0]\n self.rect.top = rect[1]\n self.rect.width = rect[2]\n self.rect.height = rect[3]\n self.font = pygame.font.Font(pygame.font.get_default_font(), 23)\n\n def select(self):\n self.image = pygame.transform.scale(menu_button_selected,\n [self.rect.width, self.rect.height])\n\n def deselect(self):\n self.image = pygame.transform.scale(menu_button,\n [self.rect.width, self.rect.height])\n\n\nclass B_Continue(Button):\n '1'\n\n def __init__(self, rect):\n super().__init__(rect)\n self.text = 'Continue'\n\n def action(self):\n State.t = False\n\n\nclass B_Start_Over(Button):\n '2'\n def __init__(self, rect):\n super().__init__(rect)\n self.text = 'Start Over'\n\n def action(self):\n global realGuy\n # global t\n State.t = False\n\n for object in State.player_group:\n\n object.speed = [0,0]\n object.kill()\n for object in State.movable:\n object.kill()\n for object in State.interface:\n object.kill()\n\n realGuy = Funcs.ship_assign(State.picked_ship, State.start_lives,\n player=True)\n\n State.save['level'] = 0\n\n Funcs.spawn_wave(realGuy)\n Scripts.main_loop(realGuy)\n\n\nclass B_New_Game(Button):\n '3'\n def __init__(self, rect):\n super().__init__(rect)\n self.text = 'New Game'\n\n def action(self):\n State.level = 0\n\n realGuy = Funcs.ship_assign(State.picked_ship, State.start_lives,\n player=True)\n\n Scripts.main_loop(realGuy)\n\n\nclass B_Stats(Button):\n '4'\n def __init__(self, rect):\n super().__init__(rect)\n with open('save.pkl', 'rb') as f:\n pickle.dump(State.save, f, pickle.HIGHEST_PROTOCOL)\n\n def action(self):\n pass\n\n\nclass B_Exit(Button):\n '5'\n def __init__(self, rect):\n super().__init__(rect)\n self.text = 'Exit'\n\n def action(self):\n State.paused = False\n pg.event.post(pg.event.Event(pg.QUIT, {'QUIT': True}))\n sys.exit()\n\n\nclass B_Ship_Highlihgts(Button):\n '6'\n def __init__(self, rect, ship_number):\n\n super().__init__(rect)\n self.ship_number = ship_number\n self.text = State.SHIPS_TEXTS[ship_number]\n\n self.main_image = SHIPS_IMGS[ship_number]\n ship_rect = self.main_image.get_rect()\n self.ship_img_pos = (rect[2]//2 - ship_rect.width//2,\n rect[3]//2 - ship_rect.height//2)\n\n self.image = pygame.transform.scale(menu_button,\n [self.rect.width, self.rect.height])\n\n self.image.blit(self.main_image, (self.ship_img_pos[0],\n self.ship_img_pos[1]))\n\n def action(self):\n\n State.picked_ship = self.ship_number\n\n def select(self):\n self.image = pygame.transform.scale(menu_button_selected,\n [self.rect.width, self.rect.height])\n\n self.image.blit(self.main_image, (self.ship_img_pos[0],\n self.ship_img_pos[1]))\n\n def deselect(self):\n self.image = pygame.transform.scale(menu_button,\n [self.rect.width, self.rect.height])\n self.image.blit(self.main_image, (self.ship_img_pos[0],\n self.ship_img_pos[1]))\n","sub_path":"Buttons.py","file_name":"Buttons.py","file_ext":"py","file_size_in_byte":3940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"641720047","text":"# -*- coding:utf-8-*-\n'''\n@Description: In User Settings Edit\n@Author: zhansu\n@Date: 2019-09-03 19:21:14\n@LastEditTime: 2019-09-04 14:59:00\n@LastEditors: Please set LastEditors\n'''\nimport numpy as np\nimport random\nimport os\nimport math\nimport pandas as pd\nimport warnings\nimport time\nimport pickle\nfrom collections import Counter\nfrom functools import wraps\nimport nltk\nfrom nltk.corpus import stopwords\nfrom numpy.random import seed\nimport math\n\nseed(1234)\n\nstopwords = stopwords.words(\"english\")\n\n\ndef log_time_delta(func):\n @wraps(func)\n def _deco(*args, **kwargs):\n start = time.time()\n ret = func(*args, **kwargs)\n end = time.time()\n delta = end - start\n print(\"%s runed %.2f seconds\" % (func.__name__, delta))\n return ret\n return _deco\n\n\ndef cut(sentence):\n\n tokens = sentence.lower().split()\n # tokens = [word for word in sentence.split() if word not in stopwords]\n return tokens\n\n\ndef get_alphabet(corpuses):\n \"\"\"\n obtain the dict\n :param corpuses: \n \"\"\"\n word_counter = Counter()\n\n for corpus in corpuses:\n for sentence in corpus[\"question\"].unique():\n tokens = cut(sentence)\n for token in tokens:\n word_counter[token] += 1\n print(\"there are {} words in dict\".format(len(word_counter)))\n word_dict = {word: e + 2 for e, word in enumerate(list(word_counter))}\n word_dict['UNK'] = 1\n word_dict[''] = 0\n\n return word_dict\n\n\ndef get_embedding(alphabet, filename=\"\", embedding_size=100):\n \"\"\"\n docstring here\n :param alphabet: word_dict of the train_dataset\n :param filename=\"\": filename of the embedding\n :param embedding_size=100: embedding_size\n \"\"\"\n embedding = np.random.randn(len(alphabet), embedding_size)\n if filename == \"\":\n print(\"random embedding\")\n return embedding\n with open(filename, encoding='utf-8') as f:\n i = 0\n for line in f:\n i += 1\n if i % 100000 == 0:\n print('epch %d' % i)\n items = line.strip().split(' ')\n if len(items) == 2:\n vocab_size, embedding_size = items[0], items[1]\n print((vocab_size, embedding_size))\n else:\n word = items[0]\n if word in alphabet:\n embedding[alphabet[word]] = items[1:]\n\n print('done')\n\n return embedding\n\n\n@log_time_delta\ndef prepare(corpuses, dim=50):\n \"\"\"\n docstring here\n :param corpuses: dataset\n :param dim=50: embedding dimension\n \"\"\"\n# get the word_dict\n alphabet = get_alphabet(corpuses)\n # embedding_file = '/Users/zhansu/program/code/embedding/glove.6B/glove.6B.50d.txt'\n embedding_file = \"\"\n # get the embedding of the dataset\n sub_embeddings = get_embedding(\n alphabet, filename=embedding_file, embedding_size=5)\n return alphabet, sub_embeddings\n\n\ndef get_lookup_table(embedding_params):\n id2word = embedding_params['id2word']\n word_vec = embedding_params['word_vec']\n lookup_table = []\n\n # Index 0 corresponds to nothing\n lookup_table.append([0] * embedding_params['wvec_dim'])\n for i in range(1, len(id2word)):\n word = id2word[i]\n wvec = [0] * embedding_params['wvec_dim']\n if word in word_vec:\n wvec = word_vec[word]\n # print(wvec)\n lookup_table.append(wvec)\n\n lookup_table = np.asarray(lookup_table)\n return(lookup_table)\n\n\ndef convert_to_word_ids(sentence, alphabet, max_len=40):\n \"\"\"\n docstring here\n :param sentence: \n :param alphabet: \n :param max_len=40: \n \"\"\"\n indices = []\n tokens = cut(sentence)\n\n for word in tokens:\n if word in alphabet:\n indices.append(alphabet[word])\n else:\n continue\n result = indices + [alphabet['']] * (max_len - len(indices))\n\n return result[:max_len]\n\n\ndef load(data_dir):\n \"\"\"\n docstring here\n loading the dataset\n :param data_dir: the data dir\n \"\"\"\n train_df = pd.read_csv(os.path.join(data_dir, 'train.csv'), header=None, sep=\"\\t\", names=[\n \"question\", \"flag\"], quoting=3).fillna(\"WASHINGTON\")\n dev_df = pd.read_csv(os.path.join(data_dir, 'dev.csv'), header=None, sep=\"\\t\", names=[\n \"question\", \"flag\"], quoting=3).fillna(\"WASHINGTON\")\n test_df = pd.read_csv(os.path.join(data_dir, 'test.csv'), header=None, sep=\"\\t\", names=[\n \"question\", \"flag\"], quoting=3).fillna(\"WASHINGTON\")\n return train_df, dev_df, test_df\n\n\ndef gen_with_pair_single(df, alphabet, sen_len):\n \"\"\"\n docstring here get the single batch of dataset\n :param df: dataset\n :param alphabet: word_dict\n :param sen_len: sentence length\n \"\"\"\n pairs = []\n for _, row in df.iterrows():\n sentence_indice = convert_to_word_ids(\n row['question'], alphabet, max_len=sen_len)\n label = transform(row[\"flag\"])\n\n pairs.append((sentence_indice, label))\n return pairs\n\n\ndef batch_iter(data, batch_size, alphabet, shuffle=False, sen_len=33):\n \"\"\"\n docstring here\n :param data: dataset\n :param batch_size: batch_size\n :param alphabet: word_dict\n :param shuffle=False: \n :param sen_len=33: \n \"\"\"\n data = gen_with_pair_single(\n data, alphabet, sen_len)\n\n data = np.array(data)\n data_size = len(data)\n\n if shuffle:\n shuffle_indice = np.random.permutation(np.arange(data_size))\n data = data[shuffle_indice]\n\n num_batch = int((data_size - 1) / float(batch_size)) + 1\n\n for i in range(num_batch):\n start_index = i * batch_size\n end_index = min((i + 1) * batch_size, data_size)\n\n yield data[start_index:end_index]\n\n\ndef transform(flag):\n if flag == 1:\n return [0, 1]\n else:\n return [1, 0]\n","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":5919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"97525298","text":"\"\"\"\n\n Student : Shahreen Shahjahan Psyche\n\n\"\"\"\n\nclass Node:\n \"\"\"\n Run Time Complexities of each functions are as follows:\n\n\n def push : O(1)\n def pop : O(N)\n\n Memory Complexity:\n\n O(len(size of the linked list)) or O(N)\n\n\n The code ran successfully!\n \"\"\"\n def __init__(self, data):\n self.data = data\n self.next = None\n \nclass Stack:\n def __init__(self):\n\n # Initializing the place holders for the root of the stack and the last address of the stack.\n self.root_adr = None\n self.curr_adr = None\n\n def push(self, data):\n new_node = Node(data)\n\n # Creating a new node and saving it in the root_adr when root is none.\n if self.root_adr is None:\n self.root_adr = new_node\n self.curr_adr = self.root_adr\n\n # Otherwise creating a new node and saving it to the next position of the curr_adr node.\n else:\n self.curr_adr.next = new_node\n self.curr_adr = self.curr_adr.next\n\n def pop(self):\n\n # Checking whether the root node is empty.\n if self.root_adr is None:\n return None\n\n return_node = self.curr_adr\n val = return_node.data\n\n\n # Checking whether there is only 1 value in the stack.\n if self.root_adr == self.curr_adr:\n self.root_adr = None\n self.curr_adr = None\n return val\n\n # Iterating through the stack until the second last Node. Then making None the next pointer of the second last node.\n current_node = self.root_adr\n while(current_node is not None):\n if current_node.next == return_node:\n current_node.next = None\n self.curr_adr = current_node\n break\n else:\n current_node = current_node.next\n del return_node\n return val\n\n\n \na_stack = Stack()\nwhile True:\n print('push ')\n print('pop')\n print('quit')\n do = input('What would you like to do? ').split()\n \n operation = do[0].strip().lower()\n if operation == 'push':\n a_stack.push(int(do[1]))\n elif operation == 'pop':\n popped = a_stack.pop()\n if popped is None:\n print('Stack is empty.')\n else:\n print('Popped value: ', int(popped))\n elif operation == 'quit':\n break\n","sub_path":"Exercise_2.py","file_name":"Exercise_2.py","file_ext":"py","file_size_in_byte":2461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"277641285","text":"import sys\nimport os.path\nimport os\nimport time\n''' Tester only for developers purpose!!! Not for real student tasks.'''\nsolution_name = \"solution.cpp\"\ntime_limit = 2\n\ndef print_error(text):\n print('\\033[31;1m' + text + '\\033[m')\n\ndef print_info(text):\n print('\\033[34;1m' + text + '\\033[m')\n\ndef print_good(text):\n print('\\033[32;1m' + text + '\\033[m')\n\nif __name__ == \"__main__\":\n\n if len(sys.argv) > 1:\n solution_name = sys.argv[1]\n\n print_info(\"Solution {solution_name} will be tested!\".format(solution_name=solution_name))\n \n solution_path = \"solution_to_test/\" + solution_name\n \n if not os.path.exists(solution_path):\n print_error(\"File {name} not exists!\".format(name=solution_path))\n exit(1)\n #build solution to test\n os.system(\"(cd solution_to_test/ && PROJECT={solution_name} make)\".format(solution_name=solution_name))\n\n cnt_success_tests = 0\n for test in range(1, 51, 1):\n in_test = \"tests/in/\" + str(test)\n out_test = \"tests/out/\" + str(test)\n res_test = \"tests/result/\" + str(test)\n\n if not os.path.exists(in_test):\n print_error(\"Error no exists: {file}\".format(file=in_test))\n exit(1)\n\n if not os.path.exists(out_test):\n print_error(\"Error no exists: {file}\".format(file=out_test))\n exit(1)\n \n project_name = solution_name[:solution_name.find('.')]#cut file extention\n ###start solution with in_test input and res_test output\n ###tmp has program time execution\n #\n os.system(\"{ \"+(\"time ./solution_to_test/{project_name} <{in_test} >{res_test}\".\\\n format(project_name=project_name, in_test=in_test, res_test=res_test))+\" ; } 2> tmp\")\n #\n execution_time = 0\n #read execution time from tmp file\n with open('tmp') as file:\n line = file.readline()\n #time format has 4 symbols, example 3.32system\n execution_time = float(line[line.find('system') - 4: line.find('system')])\n\n print(\"##########################################\")\n print(\"Test {test} finished\".format(test=str(test)))\n print(\"Time: \" + str(execution_time))\n \n cmp_res = os.system(\"cmp {out_test} {res_test}\".format(out_test=out_test, res_test=res_test))\n if cmp_res != 0 or execution_time > time_limit:\n print_error(\"Not success\")\n else:\n print_good(\"OK\")\n cnt_success_tests += 2\n \n print_info(\"Result: {res}\".format(res=cnt_success_tests))\n\n \n\n","sub_path":"infection/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"96575533","text":"#siva\nn=int(input())\nsumm=0\nk=n\nwhile n>0:\n r=n%10\n summ=summ+r**3\n n=n//10\nprint(summ)\nif summ==k:\n print('yes')\nelse:\n print('not')\n\n","sub_path":"arms.py","file_name":"arms.py","file_ext":"py","file_size_in_byte":150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"476152045","text":"from pydataweaver.lib.scripts import SCRIPT_LIST, get_script\n\n\ndef datasets(keywords=None, licenses=None):\n \"\"\"Return list of all available datasets.\"\"\"\n script_list = SCRIPT_LIST()\n\n if not keywords and not licenses:\n return sorted(script_list, key=lambda s: s.name.lower())\n\n result_scripts = set()\n if licenses:\n licenses = [l.lower() for l in licenses]\n for script in script_list:\n if script.name:\n if licenses:\n # get a list of all licenses in lower case present in the scripts\n script_license = [lc.lower for lc in sum(script.licenses.values(), [])]\n\n if script_license and set(script_license).intersection(set(licenses)):\n result_scripts.add(script)\n continue\n if keywords:\n script_keywords = script.title + \" \" + script.name\n if script.keywords:\n script_keywords = script_keywords + \" \" + \"-\".join(script.keywords)\n script_keywords = script_keywords.lower()\n for k in keywords:\n if script_keywords.find(k.lower()) != -1:\n result_scripts.add(script)\n break\n\n return sorted(list(result_scripts), key=lambda s: s.name.lower())\n\n\ndef dataset_names():\n \"\"\"Return list of all available dataset names.\"\"\"\n all_scripts = datasets()\n scripts_name = []\n\n for script in all_scripts:\n scripts_name.append(script.name)\n\n return scripts_name\n\n\ndef license(dataset):\n \"\"\"Get the license for a dataset.\"\"\"\n return get_script(dataset).licenses\n\n\ndef dataset_licenses():\n \"\"\"Return set with all available licenses.\"\"\"\n script_license = []\n for script in SCRIPT_LIST():\n temp_list = [lc.lower for lc in sum(script.licenses.values(), [])]\n script_license.append(temp_list)\n return set(script_license)\n","sub_path":"pydataweaver/lib/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":1934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"89271637","text":"# -*- coding: utf-8 -*-\n###############################################################################\n#\n# Tech-Receptives Solutions Pvt. Ltd.\n# Copyright (C) 2009-TODAY Tech-Receptives().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see .\n#\n###############################################################################\n\nfrom openerp import models, fields\nfrom openerp import models, fields, api, _\n\nclass Student(models.Model):\n\n _inherit = 'res.partner'\n\n library_card_id = fields.Many2one('op.library.card', 'Library Card')\n book_movement_lines = fields.One2many(\n 'op.book.movement', 'student_id', 'Movements') \n \n \n @api.multi\n def open_report(self):\n '''print library catd of student '''\n \n value = {\n 'type': 'ir.actions.report.xml',\n 'report_name': 'openeducat_library.report_student_library_card',\n 'datas': {\n 'model': 'res.partner',\n 'id': self.id,\n 'ids': [self.id],\n 'report_type': 'pdf',\n 'report_file': 'openeducat_library.report_student_library_card'\n },\n 'nodestroy': True\n \n }\n return value\n\nclass OpStudent(models.Model):\n _inherit = 'op.student'\n\n library_card_id = fields.Many2one('op.library.card', 'Library Card')\n book_movement_lines = fields.One2many(\n 'op.book.movement', 'student_id', 'Movements')\n\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"openeducat_library/models/student.py","file_name":"student.py","file_ext":"py","file_size_in_byte":2185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"46591655","text":"import json\nimport unittest\nfrom unittest import mock\n\nfrom flask import Flask\nfrom nap.cache import django_cache, flask_cache\nfrom nap.cache.base import (\n DEFAULT_TIMEOUT,\n MAX_CACHE_KEY_LENGTH,\n BaseCacheBackend\n)\nfrom tests import SampleCacheableResource, SampleResourceModel\n\n\nclass TestBaseCacheBackend:\n def get_backend(self, **kwargs):\n defaults = {\n 'default_timeout': DEFAULT_TIMEOUT,\n 'obey_cache_headers': True,\n }\n defaults.update(kwargs)\n\n return BaseCacheBackend(**defaults)\n\n def get_fake_request(self, **kwargs):\n defaults = {\n 'url': 'http://www.foo.com/bar/',\n }\n\n defaults.update(kwargs)\n mock_request = mock.Mock()\n for attr, val in defaults.items():\n setattr(mock_request, attr, val)\n\n return mock_request\n\n def get_fake_response(self, **kwargs):\n defaults = {\n 'status_code': 200,\n 'url': 'http://www.foo.com/bar/',\n 'headers': {},\n }\n\n defaults.update(kwargs)\n mock_response = mock.Mock()\n for attr, val in defaults.items():\n setattr(mock_response, attr, val)\n\n return mock_response\n\n def test_get_cache_key(self):\n obj = SampleResourceModel(\n title='expected_title',\n content='Blank Content'\n )\n cache_backend = self.get_backend()\n\n uri = SampleResourceModel.objects.get_lookup_url(resource_obj=obj)\n url = SampleResourceModel.objects.get_full_url(uri)\n key = cache_backend.get_cache_key(SampleResourceModel, url)\n assert key == \"note::http://foo.com/v1/expected_title/\"\n\n def test_cache_key_very_long_is_hashed(self):\n # Given: A cache backend and a very long URL\n cache_backend = self.get_backend()\n url = (\n 'https://a.very.long.url.should.contain.a.famous.quote.such.as.'\n 'Government.of.the people,by.the.people,for.the.people,shall.not.'\n 'perish.from.the.Earth.'\n 'Hmmm.That.wasnt.long.enough.How.About.'\n 'And.in.the.end,its.not.the.years.in.your.life.that.count..Its.'\n 'the.life.in.your.years.'\n 'Abraham.Lincoln.com'\n )\n assert len(url) > MAX_CACHE_KEY_LENGTH\n cache_backend.get_cache_key(SampleResourceModel, url)\n SampleResourceModel(\n title='expected_title',\n content='Blank Content'\n )\n expected_key = 'note::020b8d3c397af5f2ade0a683608068ee'\n cache_key = cache_backend.get_cache_key(SampleResourceModel, url)\n assert len(cache_key) < MAX_CACHE_KEY_LENGTH\n assert cache_key == expected_key\n\n def test_get_cache_key_with_parameters(self):\n kwargs = {'c': 1, 'b': 2, 'a_list': [5, 4, 3]}\n obj = SampleResourceModel(\n title='expected_title',\n content='Blank Content',\n )\n cache_backend = self.get_backend()\n\n uri = SampleResourceModel.objects.get_lookup_url(\n resource_obj=obj, **kwargs\n )\n url = SampleResourceModel.objects.get_full_url(uri)\n key = cache_backend.get_cache_key(SampleResourceModel, url)\n expected_value = (\n 'note::http://foo.com/v1/expected_title/?a_list=5&a_list=4&'\n 'a_list=3&b=2&c=1'\n )\n assert key == expected_value\n\n def test_get_timeout_from_header(self):\n cache_backend = self.get_backend()\n headers = {\n 'cache-control': 'public, max-age=2592000'\n }\n mock_response = self.get_fake_response(headers=headers)\n\n timeout = cache_backend.get_timeout_from_header(mock_response)\n assert timeout == 2592000\n\n def test_get_timeout_from_header_no_cache(self):\n cache_backend = self.get_backend()\n headers = {\n 'cache-control': 'no-cache'\n }\n mock_response = self.get_fake_response(headers=headers)\n\n timeout = cache_backend.get_timeout_from_header(mock_response)\n assert timeout is None\n\n def test_get_timeout(self):\n cache_backend = self.get_backend()\n mock_response = self.get_fake_response()\n timeout = cache_backend.get_timeout(mock_response)\n assert timeout == DEFAULT_TIMEOUT\n\n cache_backend = self.get_backend(default_timeout=42)\n timeout = cache_backend.get_timeout(mock_response)\n assert timeout == 42\n\n headers = {\n 'cache-control': 'public, max-age=2592000'\n }\n mock_response = self.get_fake_response(headers=headers)\n timeout = cache_backend.get_timeout(mock_response)\n assert timeout == 2592000\n\n cache_backend = self.get_backend(\n default_timeout=42,\n obey_cache_headers=False\n )\n timeout = cache_backend.get_timeout(mock_response)\n assert timeout == 42\n\n\nclass TestDjangoCacheBackend(TestBaseCacheBackend):\n def get_backend(self, **kwargs):\n defaults = {\n 'default_timeout': DEFAULT_TIMEOUT,\n 'obey_cache_headers': True,\n }\n defaults.update(kwargs)\n\n return django_cache.DjangoCacheBackend(**defaults)\n\n def test_get(self):\n backend = self.get_backend()\n res = mock.Mock()\n res.url = 'naprulez.org'\n\n with mock.patch('django.core.cache.cache.get') as dj_cache_get:\n dj_cache_get.return_value = 'a thing'\n backend.get(res)\n assert dj_cache_get.called\n\n def test_set(self):\n backend = self.get_backend()\n res = mock.Mock()\n res.url = 'naprulez.org'\n res.headers = {}\n\n with mock.patch('django.core.cache.cache.set') as dj_cache_set:\n backend.set(res, res.value)\n assert dj_cache_set.called\n\n\nclass TestFlaskCacheBackend(TestBaseCacheBackend):\n def get_backend(self, **kwargs):\n defaults = {\n 'default_timeout': DEFAULT_TIMEOUT,\n 'obey_cache_headers': True,\n 'config': {\n 'CACHE_TYPE': 'simple',\n 'CACHE_KEY_PREFIX': '',\n }\n }\n app = Flask(__name__, static_url_path='/store/static')\n defaults.update(kwargs)\n\n return flask_cache.FlaskCacheBackend(app, **defaults)\n\n def test_get(self):\n backend = self.get_backend()\n res = mock.Mock()\n res.url = 'naprulez.org'\n\n with mock.patch('flask_caching.Cache.get') as fl_cache_get:\n fl_cache_get.return_value = 'a thing'\n backend.get(res)\n assert fl_cache_get.called\n\n def test_set(self):\n backend = self.get_backend()\n res = mock.Mock()\n res.url = 'naprulez.org'\n res.headers = {}\n\n with mock.patch('flask_caching.Cache.set') as fl_cache_set:\n backend.set(res, res.value)\n assert fl_cache_set.called\n\n\nclass TestCaching(unittest.TestCase):\n def setUp(self):\n self.the_cache = SampleCacheableResource._meta['cache_backend']\n # NOTE: there is a single instance of the in memory (fake) cache for\n # all instances of our model\n self.the_cache.clear()\n\n @mock.patch('requests.request')\n def test_get_response_from_filter_is_cached(self, mock_request):\n \"\"\"Test that filter() responses can be cached.\n NOTE: nap only supports GETs on filter() calls. Weird\"\"\"\n\n # create mock request nap is going to issue and a mock response that\n # nap will get back\n r = mock.Mock()\n r.status_code = 200\n r.content = json.dumps([\n {'title': 'hello1', 'content': 'content1'},\n {'title': 'hello2', 'content': 'content2'}\n ])\n mock_request.return_value = r\n\n # Make a request with caching DISABLED and verify nothing was cached\n obj = SampleCacheableResource.objects.filter(skip_cache=True)\n assert len(obj) == 2\n assert len(self.the_cache.get_cached_data()) == 0\n\n # Repeat this time with caching ENABLED and verify the request /\n # response was cached\n obj = SampleCacheableResource.objects.filter(skip_cache=False)\n assert len(obj) == 2\n assert len(self.the_cache.get_cached_data()) == 1\n\n # Repeat a final time, this time we should get the object from cache\n # w/o making another network request\n mock_request.return_value = None\n mock_request.side_effect = Exception(\n 'We\\'re making a network request when we should be using the '\n 'cached data'\n )\n SampleCacheableResource.objects.filter(skip_cache=False)\n assert obj is not None\n assert len(self.the_cache.get_cached_data()) == 1\n\n @mock.patch('requests.request')\n def test_get_response_from_lookup_is_cached(self, mock_request):\n \"\"\"Test that lookup() responses can be cached.\"\"\"\n\n # create mock request nap is going to issue and a mock response that\n # nap will get back\n r = mock.Mock()\n r.status_code = 200\n r.content = json.dumps({'title': 'hello1', 'content': 'content1'})\n mock_request.return_value = r\n\n # Make a request with caching DISABLED and verify nothing was cached\n obj = SampleCacheableResource.objects.lookup(skip_cache=True)\n assert obj is not None\n\n # NOTE: unlike filter() lookup() always puts the result in the cache.\n # filter() only stores values in the cache\n # if skip_cache is False\n assert len(self.the_cache.get_cached_data()) == 1\n\n # Repeat this time with caching ENABLED and verify the request /\n # response was cached\n obj = SampleCacheableResource.objects.lookup(skip_cache=False)\n assert obj is not None\n assert len(self.the_cache.get_cached_data()) == 1\n\n # Repeat a final time, this time we should get the object from cache\n # w/o making another network request\n mock_request.return_value = None\n mock_request.side_effect = Exception(\n 'We\\'re making a network request when we should be using the '\n 'cached data'\n )\n obj = SampleCacheableResource.objects.lookup(skip_cache=False)\n assert obj is not None\n assert len(self.the_cache.get_cached_data()) == 1\n","sub_path":"tests/test_cache.py","file_name":"test_cache.py","file_ext":"py","file_size_in_byte":10301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"502649978","text":"from django.db import models\nfrom django.conf import settings\n# Create your models here.\n\n\nclass Tag(models.Model):\n name = models.CharField(max_length=100)\n\n\nclass Video(models.Model):\n ''' A video model that stores information about uploaded videos'''\n related_name = 'videos'\n related_query_name = 'video'\n\n video_id = models.CharField(max_length=11)\n title = models.CharField(max_length=2000, db_index=True, verbose_name='Title')\n description = models.TextField(null=True, blank=True, verbose_name='Description')\n url = models.URLField(verbose_name='URL field')\n youtube_tags = models.ManyToManyField(Tag, related_name=related_name, related_query_name=related_query_name)\n thumbnail = models.URLField(verbose_name='thumbnail image')\n duration = models.DurationField()\n view_count = models.IntegerField(default=0)\n like_count = models.IntegerField(default=0)\n dislike_count = models.IntegerField(default=0)\n comment_count = models.IntegerField(default=0)\n\n\nclass Course(models.Model):\n ''' Model showing collection of videos '''\n related_name = 'courses'\n related_query_name = 'course'\n\n videos = models.ManyToManyField(Video, related_name=related_name, related_query_name=related_query_name)\n creator = models.ForeignKey(settings.AUTH_USER_MODEL, related_name=related_name, related_query_name=related_query_name)\n course_title = models.CharField(max_length=500, db_index=True, verbose_name='Title of the course')\n course_description = models.TextField(null=True, blank=True, verbose_name='Description of the course')\n prerequisite = models.CharField(max_length=100)\n course_tags = models.ManyToManyField(Tag, related_name=related_name, related_query_name=related_query_name)\n course_image = models.URLField(null=True, blank=True)\n view_count = models.IntegerField(default=0)\n # class Meta:\n # unique_together\n\n\nclass Rating(models.Model):\n user = models.ForeignKey(settings.AUTH_USER_MODEL)\n course = models.ForeignKey(Course)\n rating = models.IntegerField(default=0)\n\n\nclass CourseHistory(models.Model):\n user = models.ForeignKey(settings.AUTH_USER_MODEL)\n course = models.ForeignKey(Course)\n history_datetime = models.DateTimeField(auto_now_add=True)\n","sub_path":"course_management/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"613273151","text":"import numpy as np\nfrom scipy.fftpack import ifft\nimport scipy.linalg\nimport scipy.io\nimport scipy.signal\nfrom matplotlib import pyplot as plt\n\n\ndef dft_matrix(N):\n return scipy.linalg.dft(N)\n\n\ndef dft_matrix_2d(N):\n x = scipy.linalg.dft(N)\n return np.kron(x, x)\n\n\ndef hilbert_rotation(x, axis=-1):\n N = x.shape[axis]\n h = np.zeros(N)\n if N % 2 == 0:\n h[0] = h[N // 2] = 1\n h[1:N // 2] = 2\n else:\n h[0] = 1\n h[1:(N + 1) // 2] = 2\n if x.ndim > 1:\n ind = [np.newaxis] * x.ndim\n ind[axis] = slice(None)\n h = h[tuple(ind)]\n transformed = x * h\n return transformed\n\ndef find_peak(data):\n idx = np.unravel_index(np.argmax(data, axis=None), data.shape)\n return idx, data[idx]\n\ndef peak_frequency(x, channels=None, dftmatrix=None, idftmatrix=None):\n N = x.shape[0]\n x = np.asarray(x)\n if channels is None:\n channels = {'delta': (1, 4), 'theta': (4, 8), 'alpha': (8, 12), 'beta': (12, 30), 'gamma': (30, 45)}\n if np.iscomplexobj(x):\n raise ValueError(\"x is not a real signal.\")\n if dftmatrix is None:\n dftmatrix = dft_matrix(N)\n idftmatrix = np.linalg.inv(dftmatrix)\n xdft = dftmatrix.dot(x)\n H = hilbert_rotation(xdft)\n Z = dict()\n for channel in channels:\n signal = np.zeros(N, dtype=complex)\n signal[channels[channel][0]:channels[channel][1]] = H[channels[channel][0]:channels[channel][1]]\n\n Z[channel] = signal.dot(idftmatrix)\n\n plt.plot(np.imag(Z[channel]), label=\"imag\")\n plt.plot(np.real(Z[channel]), label=\"real\")\n plt.plot(np.abs(Z[channel]),label=\"absolute\")\n plt.legend()\n plt.show()\n return Z\n\n\nif __name__ == \"__main__\":\n MAT = scipy.io.loadmat('motor-imagery-eeg.mat')\n dict_keys = [*MAT.keys()]\n X = MAT[dict_keys[3]]\n data = X[0, :, 0]\n fs = 500\n plt.plot(data[:1000], label=\"data\")\n plt.legend()\n plt.show()\n #peak_frequency(data[:1000],{'all': (1, 45)})\n analytic_signal = scipy.signal.hilbert(data)\n amplitude_envelope = np.abs(analytic_signal)\n plt.plot(data[:100],label=\"data\")\n plt.plot(amplitude_envelope[:100],label=\"envelope\")\n plt.legend()\n plt.show()\n","sub_path":"signalprocessing/peakfrequency/old_scripts/hilbert.py","file_name":"hilbert.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"217131062","text":"import logging\nimport sqlite3\n\nfrom requests import HTTPError\n\nfrom online_scrapers import all_scrapers\n\n\ndef get_text(link):\n for scraper in scrapers:\n if scraper.can_scrape(link):\n return scraper.scrape_text(link)\n logging.error(f\"No scraper available for {link}\")\n return None\n\n\ndef get_articles(conn, where=\"status is null\", n=100):\n cur = conn.cursor()\n cur.execute(f\"SELECT link as url, title, medium as publisher, date FROM articles where {where} limit {n}\")\n colnames = [x[0] for x in cur.description]\n rows = cur.fetchall()\n return [dict(zip(colnames, r)) for r in rows]\n\n\nclass SkipArticle(Exception):\n pass\n\ndef scrape_article(link):\n if ('video' in link) or ('redirect' in link) or ('Liveblog' in article['title']):\n raise SkipArticle(\"Video/redirect/liveblog\")\n try:\n text = get_text(link)\n except HTTPError as err:\n if (err.response.status_code == 404) or (err.response.status_code == 403) or (err.response.status_code == 410):\n logging.error(f\"Article not found (404, 403): {link}\")\n raise SkipArticle(\"404\")\n else:\n raise\n if not text:\n raise SkipArticle(\"Empty\")\n return text\n\n\ndef set_status(conn, articles, status='done'):\n urls = \",\".join(f\"'{a['url']}'\" for a in articles)\n with conn:\n cur = conn.cursor()\n cur.execute(f\"Update articles set status = '{status}' where link in ({urls})\")\n\n\nlogging.basicConfig(level=logging.INFO, format='[%(asctime)s %(name)-12s %(levelname)-5s] %(message)s')\n\nfrom amcatclient import AmcatAPI\nc = AmcatAPI(\"http://localhost:8000\")\nscrapers = all_scrapers()\n\ndb = \"coosto.db\"\nconn = sqlite3.connect(db)\nproject = 1\narticleset = 129\n\nwhile True:\n logging.info(\"Retrieving articles to scrape from database\")\n articles = get_articles(conn)\n if not articles:\n break\n to_save = []\n to_skip = []\n\n for i, article in enumerate(articles):\n logging.info(f\"[{i + 1}/{len(articles)}] Scraping article {article['url']}\")\n try:\n print(article['url'])\n article['text'] = scrape_article(article['url'])\n to_save.append(article)\n except SkipArticle:\n to_skip.append(article)\n\n logging.info(f\"Saving {len(to_save)} articles, skipped {len(to_skip)}\")\n\n c.create_articles(project, articleset, to_save)\n set_status(conn, to_save, status='done')\n set_status(conn, to_skip, status='skip')\n\nlogging.info(\"DONE\")\n","sub_path":"rss_scrapers.py","file_name":"rss_scrapers.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"448384470","text":"# -*- coding: utf-8 -*-\n\n# STEP 4\n# 把set修剪(大于 MAX_SEQ_LEN 的set去除,小于的补齐EOS PAD)\n\nimport sys\nimport os\nimport re\n\ncur_file_no = 0\ncur_del_no = 0\n\nMAX_SEQ_LEN = 20\nEOS = '40998'\nPAD = '40999'\n\n# 设定读取目录\ninput_path = r'/Users/xuzhe/Other/demoinput'\n\n# 设定输出目录\noutput_path = r'/Users/xuzhe/Other/demoout'\n\nfor root, dirs, files in os.walk(input_path):\n for file_name in files:\n file_path = root + \"/\" + file_name\n\n # mac下的烦人文件\n if '.DS_Store' == file_name:\n continue\n\n f = open(file_path,'r')\n data = f.read()\n f.close()\n if f :\n try:\n sets = data.split('\\n#####\\n')\n new_sets = []\n for set in sets:\n seqs = set.split('\\n')\n new_seqs = []\n\n is_ignore = 0\n\n for seq in seqs:\n words = seq.split(' ')\n seq_len = len(words)\n if seq_len >= MAX_SEQ_LEN :\n cur_del_no += 1\n is_ignore = 1\n break\n else:\n pad_num = MAX_SEQ_LEN - seq_len - 1\n words.append(EOS)\n [words.append(PAD) for _ in range(pad_num)]\n\n new_seqs.append(' '.join(words))\n\n if is_ignore < 1:\n new_sets.append('\\n'.join(new_seqs))\n\n fw = open(output_path+ \"/\" + file_name, 'w')\n fw.write('\\n#####\\n'.join(new_sets))\n fw.close()\n\n\n except:\n print (\"except: %s\" %(file_path))\n\n cur_file_no += 1\n print(\"[%d] completed del set %d \" %( cur_file_no, cur_del_no))\n","sub_path":"Hermit/cron/data2set/triple/trim.py","file_name":"trim.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"95051967","text":"#限制实例的属性\n#只允许对Student实例添加name和age属性\n#定义一个特殊的__slots__变量,来限制该class实例能添加的属性\n# 由于'score'没有被放到__slots__中,所以不能绑定score属性,试图绑定score将得到AttributeError的错误。\n# 使用__slots__要注意,__slots__定义的属性仅对当前类实例起作用,对继承的子类是不起作用的:\nclass Student(object):\n __slots__ = ('name', 'age')\nclass GraduateStudent(Student):\n __slots__ = ('glass')\n\nif __name__=='__main__':\n s = Student()\n s.name = 'm'\n s.age = 15\n print(s.name,s.age)\n #.score = 90\n g = GraduateStudent()\n # g.score = 90\n # print(g.score)\n g1 = GraduateStudent()\n g1.glass = '1'\n g.score = 90","sub_path":"p_learn/p__slots__.py","file_name":"p__slots__.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"599055201","text":"\"\"\"\nThe Random Walker \n--------------------------------------------------------------------------------\nAuthor: Griffin Chure\nLast Modified: September 23, 2019\nLicense: MIT\n\nDescription\n--------------------------------------------------------------------------------\nThis file generates a standalone HTML file of a random walker with 100,000 steps\nin two dimensions. The left plot shows the overview of the entire random walk\nwhile the right plot shows the current position and the last 5000 steps of the\nwalker\n\n\"\"\"\nimport numpy as np\nimport bokeh.io\nimport bokeh.plotting\nimport bokeh.layouts\nimport bokeh.palettes\nfrom bokeh.models import *\nbokeh.io.output_file('random_walk.html')\ncolors = {'black':'#444147', 'purple': '#7E59A2', 'orange':'#E39943'} \n\n# Define the initial number of steps\nn_steps = int(1E5)\n\n# Assemble the three necessary data sources\nsource = ColumnDataSource({'x':[], 'y':[]})\ncurrent_position = ColumnDataSource({'x':[], 'y':[]})\ndisplay_source = ColumnDataSource({'x':[], 'y':[]})\n\n# Define the interactive components\nrun_button = Button(label='click to generate a random walk')\nstep_slider = Slider(title='number of steps', start=10, end=int(n_steps), step=1,\n value=10, bar_color=colors['purple'])\n\n# Define the javascript for each component\ngenerate_walk = \"\"\"\n\n// Instantiate position vectors\nvar xs = [0]\nvar ys = [0]\n\n// Loop through each step, starting with the second.\nfor (var i = 1; i < n_steps; i++) { \n\n // Determine a random angle to step towards\n theta = Math.random() * 2 * Math.PI;\n\n // Compute and store the new xy positions\n xs.push(xs[i - 1] + Math.cos(theta)); \n ys.push(ys[i - 1] + Math.sin(theta)); }\n\n\n// Update the data source\nsource.data['x'] = xs;\nsource.data['y'] = ys;\nsource.change.emit()\n\n// Reset the step slider to the default value\nstepSlider.value = 10;\n\"\"\"\n\ndisplay_steps = \"\"\"\n// Determine where to start slicing the inset data\nif (stepSlider.value < 5000) {\n var init = 0;\n}\nelse { \n var init = stepSlider.value - 5000;\n}\n\n// Assign the inset data for display\ndisplay_source.data['x'] = source.data['x'].slice(init, stepSlider.value);\ndisplay_source.data['y'] = source.data['y'].slice(init, stepSlider.value);\n\n// Highlight the current position -- Can do this with an indexfilter as well\ncurrent_position.data['x'] = display_source.data['x'].slice(-1);\ncurrent_position.data['y'] = display_source.data['y'].slice(-1);\n\n// Update the data sources\ndisplay_source.change.emit()\ncurrent_position.change.emit()\n\"\"\"\n\n\n# Define and assignthe callbacks\nargs={'stepSlider':step_slider, 'source':source,\n'display_source':display_source, 'current_position':current_position,\n'n_steps':n_steps}\ngenerate_cb = CustomJS(args=args, code=generate_walk + display_steps)\ndisplay_cb = CustomJS(args=args, code=display_steps)\nrun_button.js_on_click(generate_cb)\nstep_slider.js_on_change('value', display_cb)\n\n# Define the axes\noverview_ax = bokeh.plotting.figure(width=300, height=300, match_aspect=True,\n x_axis_label = 'x position', y_axis_label='y position')\ninset_ax = bokeh.plotting.figure(width=300, height=300, match_aspect=True,\n x_axis_label='x position', y_axis_label = 'y position')\n\n# Populate teh axes\noverview_ax.circle('x', 'y', source=current_position, color=colors['orange'],\nlevel='overlay', size=5)\noverview_ax.square('x', 'y', source=current_position, fill_color='grey', fill_alpha=0.25,\nline_color=colors['orange'], level='overlay', size=30)\noverview_ax.line('x', 'y', source=source, color=colors['black'], line_width=0.5,\nalpha=0.5)\n\ninset_ax.line('x', 'y', source=display_source, color=colors['purple'], line_width=0.5, alpha=1)\ninset_ax.circle('x', 'y', source=current_position, color=colors['orange'],\nlevel='overlay', size=5)\n\n\n# Define the layout\nrow = bokeh.layouts.row(overview_ax, inset_ax)\nlay = bokeh.layouts.column(run_button, step_slider, row)\n\n\n# Define the theme and save\ntheme_json = {\n 'attrs' : {\n 'Figure' : {\n 'background_fill_color': '#EEEEEE',\n },\n 'Axis': {\n 'axis_line_color': 'slategray',\n 'major_tick_line_color': None,\n 'minor_tick_line_color': None,\n },\n 'Legend': {\n 'border_line_color': 'slategray',\n 'background_fill_color': '#EEEEEE',\n 'border_line_width': 0.75,\n 'background_fill_alpha': 0.75,\n },\n 'Grid': {\n 'grid_line_color': '#FFFFFF',\n 'grid_line_width': 0.75,\n },\n 'Text': {\n 'text_font_style': 'italic',\n 'text_font': 'Arial', \n 'text_font_size':10,\n },\n 'Title': {\n 'text_color': '#3c3c3c',\n 'align': 'left',\n 'text_font': 'Arial',\n 'text_font_style': 'italic',\n 'offset': 2,\n }\n }\n}\n \ntheme = bokeh.themes.Theme(json=theme_json)\nbokeh.io.curdoc().theme = theme\nbokeh.io.save(lay)","sub_path":"assets/code/random_walker.py","file_name":"random_walker.py","file_ext":"py","file_size_in_byte":4952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"408399100","text":"import h5py\nimport lnPi\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndef get_spin_bin(ref,mu_in,x,spin_kwargs=None,bin_kwargs=None):\n \"\"\"\n from reference lnpi_phase, create collection and calculate spinodals and binodals\n \n Parameters\n ----------\n ref : lnPi_phases\n\n mu_in : mu iterator generator\n\n x : variable mu range\n\n spin_kwargs,bin_kwargs : extra arguments to get_spinodals, get_binodals\n\n Returns\n -------\n C : lnPi_colllection with spinodals/binodals\n \"\"\"\n\n if spin_kwargs is None: spin_kwargs={}\n if bin_kwargs is None: bin_kwargs = {}\n \n #initial estimate\n C = lnPi.lnPi_collection.from_mu(ref,mu_in,x)\n \n C.get_spinodals(**spin_kwargs)\n C.get_binodals(**bin_kwargs)\n \n return C\n\n\n\n\n\n\n################################################################################\n#read/write list of collections\n################################################################################\ndef lnPi_collections_list_to_hdf(ref,lst,path_or_buff,key=None,overwrite=True):\n \"\"\"\n write list of lnPi_collection(s) to h5py file\n \"\"\"\n if isinstance(path_or_buff,(bytes,str)):\n p = h5py.File(path_or_buff)\n\n elif isinstance(path_or_buff,(h5py.File,h5py.Group)):\n p = path_or_buff\n else:\n raise ValueError('bad path_or_buff type %s'%(type(path_or_buff)))\n\n\n if key is None:\n group = p\n else:\n if key in p:\n if overwrite:\n del p[key]\n else:\n raise RuntimeError('key %s already exists'%key)\n group = p.create_group(key)\n \n\n if ref is not None: \n ref.to_hdf(group,'lnpi_ref',overwrite=overwrite)\n \n collection_list = []\n for i,x in enumerate(lst):\n key = 'collection_%i'%i\n collection_list.append(key)\n x.to_hdf(group,key,ref=None,overwrite=overwrite)\n\n group.create_dataset('collection_list',data=np.array(collection_list))\n\n\ndef lnPi_collections_list_from_hdf(path_or_buff,key=None,ref=None,collection_key='collection_list',collection_list=None):\n \"\"\"\n read list of lnPi_collection(s) from h5py file\n \"\"\"\n\n if isinstance(path_or_buff,(bytes,str)):\n group = h5py.File(path_or_buff)\n\n elif isinstance(path_or_buff,(h5py.File,h5py.Group)):\n group = path_or_buff\n\n else:\n raise ValueError('bad path_or_buff')\n\n\n if key is not None:\n group = group[key]\n\n\n if ref is None:\n ref = lnPi.lnPi_phases.from_hdf(group,'lnpi_ref')\n\n lst = []\n\n if collection_list is not None:\n itr = collection_list\n elif collection_key is not None:\n itr = group[collection_key]\n else:\n raise ValueError('need either collection_key or collection_list')\n \n for k in itr:\n lst.append(lnPi.lnPi_collection.from_hdf(group,k,ref=ref))\n\n\n return ref,lst\n\n\n################################################################################\n#DataFrames\n################################################################################ \ndef get_spinodal_data(lst,merge=False,**kwargs):\n \"\"\"\n get Dataframe of spinodal data\n \"\"\"\n l = []\n for x in lst:\n for phaseID,s in enumerate(x.spinodals):\n if s is not None:\n if merge:\n s = s.merge_phases(**kwargs)\n \n d = dict(phaseID=phaseID)\n\n for comp,mu in enumerate(s.mu):\n d['mu_%i'%comp] = mu\n\n for comp,mf in enumerate(s.molfracs_phaseIDs):\n d['molfrac_%i'%comp] = s.molfracs_phaseIDs[phaseID,comp]\n \n d['omega'] = s.Omegas_phaseIDs()[phaseID]\n l.append(d)\n\n return pd.DataFrame(l)\n\n\ndef get_binodal_data(lst,merge=False,**kwargs):\n \"\"\"\n get DataFrame of binodal data\n \"\"\"\n l = []\n for x in lst:\n for ID,b in enumerate(x.binodals):\n if b is not None:\n if merge:\n b = b.merge_phases(**kwargs)\n\n d = dict(binodalID = ID)\n for comp,mu in enumerate(b.mu):\n d['mu_%i'%comp] = mu\n\n for phaseID in range(b.base.num_phases_max):\n dd = d.copy()\n dd['phaseID'] = phaseID\n for comp,mf in enumerate(b.molfracs_phaseIDs):\n dd['molfrac_%i'%comp] = b.molfracs_phaseIDs[phaseID,comp]\n dd['omega'] = b.Omegas_phaseIDs()[phaseID]\n l.append(dd)\n\n return pd.DataFrame(l)\n\n\n\n################################################################################\n#plotting\n################################################################################\ndef plot_omega_vs_molfrac(bino,spin,ls=['-','--'],colors=['k','k'],ax_labels=False,ax=None):\n \"\"\"\n Note: linestyle -> [bin,spin], color -> phaseID\n \"\"\"\n if ax is None:\n fig,ax = plt.subplots()\n \n if ax_labels:\n ax.set_xlabel(r'$x_0$')\n ax.set_ylabel(r'$-\\Omega$')\n \n for phaseID,g in bino.groupby('phaseID'):\n ax.plot(g.molfrac_0,-g.omega,color=colors[phaseID],ls=ls[0],label=phaseID)\n \n for phaseID,g in spin.groupby('phaseID'):\n ax.plot(g.molfrac_0,-g.omega,color=colors[phaseID],ls=ls[1])\n return ax\n\n\ndef plot_mu0_vs_mu1(bino,spin,ls=['-','--'],colors=['b','r','g'],ax_labels=False,ax=None,\n line_labels=['binodal','spin. 0','spin. 1']):\n \"\"\"\n Note: linestyle -> [bin,spin], color -> bin, spin0,spin1\n \"\"\"\n if ax is None:\n fig,ax = plt.subplots()\n \n if ax_labels:\n ax.set_xlabel(r'$\\mu_0$')\n ax.set_ylabel(r'$\\mu_1$')\n \n g = bino.query('phaseID==0')\n ax.plot(g.mu_0,g.mu_1,label=line_labels[0],ls=ls[0],color=colors[0])\n\n for i,(phaseID,g) in enumerate(spin.groupby('phaseID')):\n ax.plot(g.mu_0.values,g.mu_1.values,label=line_labels[1+i],ls=ls[1],color=colors[1+i])\n return ax\n","sub_path":"examples/2D/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"230893597","text":"import numpy as np\n\ndef distance(x1_y1,x2_y2):\n \"\"\"Calculates distance between two coordinates\n\n Arguments:\n (x1,y1) -- First x,y coordinate as tuple\n (x2,y2) -- Second x,y coordinate as tuple\n\n Returns:\n distance\n\n \"\"\"\n x1, y1 = x1_y1\n x2, y2 = x2_y2\n distance = np.sqrt((x1-x2)**2+(y1-y2)**2)\n return distance\n\ndef find_clusters(coords,threshold=0.03,indices=None):\n \"\"\"Finds clusters in list of coordinates\n\n Keyword arguments:\n coords -- python list of tuples for peak coordinates. e.g. [(x1,y1),(x2,y2),(x3,y3)] \n threshold -- distance within which peaks are considered clustered\n indices -- list of coordinate names or indices\n\n Returns:\n clusters -- list of clustered coordinates [[(index, (x, y)), (index, (x, y))],[etc]]\n\n \"\"\"\n\n if indices is None:\n indices = range(len(coords))\n\n elif len(indices) != len(coords):\n raise ValueError(\"Length of indices must be the same as the number of coordinates\")\n\n coords = zip(indices,coords)\n clusters = []\n cluster_indices = []\n while coords:\n clu = []\n sear = []\n\n t = coords.pop(-1)\n\n sear.append(t)\n clu.append(t)\n\n while sear:\n\n t2 = sear.pop(-1)\n\n for j in coords:\n if distance(t2[1],j[1]) 0:\n\t\tstr += '\\x00'\n\t\tr -= 1\n\tstr = str[::-1]\n\treturn str\n\ndef convert(in_dir, size, out_imagef, out_labelf):\n\tout_i = open(out_imagef, 'w')\n\tout_l = open(out_labelf, 'w')\n\n\t#write the headers of image and label files\n\tstr_img = '\\x00\\x00\\x08\\x03' + tohex(size) + tohex(40) + tohex(40)\n\tstr_lab = '\\x00\\x00\\x08\\x01' + tohex(size)\n\tfor i in range(1,33):\n\t\twith open(in_dir+str(i)+'.mat') as f:\n\t\t\td = sim.loadmat(f)\n\t\t\tdata = d['affNISTdata']\n\t\t\tfor x in np.nditer(data[0][0][2]):\n\t\t\t\tstr_img += chr(x)\n\t\t\tfor x in np.nditer(data[0][0][5]):\n\t\t\t\tstr_lab += chr(x)\n\t\t\tf.close()\n\tout_i.write(str_img)\n\tout_l.write(str_lab)\n\tout_i.close()\n\tout_l.close()\n\nconvert('training_and_validation_batches/', 60000*32, 'train_images_ubyte', 'train_labels_ubyte')\nconvert('test_batches/', 10000*32, 'test_images_ubyte', 'test_labels_ubyte')\n","sub_path":"aff2mnist.py","file_name":"aff2mnist.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"296563882","text":"from models.item import Item\nfrom models.alert import Alert\n\nipad = Item(\n \"https://www.johnlewis.com/2018-apple-ipad-pro-12-9-inch-a12x-bionic-ios-wi-fi-cellular-512gb/space-grey/p3834614\",\n \"p\",\n {\"class\": \"price price--large\"}\n)\n\nipad.save_to_mongo()\n\nalert = Alert(ipad._id, 1000)\nalert.save_to_mongo()","sub_path":"07_notify_when_price_reached/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"348368735","text":"from loguru import logger\nfrom .profile_base import ProfileBase\n\nclass Tinder(ProfileBase):\n\n source_name = 'tinder'\n package_name = '\\'com.tinder/.activities.MainActivity\\''\n crops = {\n 'main_image': (\n 0.026388889, \n 0.72,\n ),\n 'raw': (\n 0.2, \n 0.916666667,\n ),\n 'stop_check': (\n 0.2, \n 0.0, \n 0.8, \n 0.3,\n ),\n 'is_ad': (\n 0.305555556, \n 0.78125, \n 0.694444444, \n 0.8671875,\n ),\n }\n initial_tap_coords = (\n 0.25, \n 0.84537037, \n )\n reset_tap_coords = (\n 0.694444444, \n 0.951851852, \n )\n bio_swipe_coords = {\n 'from': 0.78125,\n 'to': 0.6,\n 'duration': None\n }\n remove_these = ['SEE WHAT A FRIEND THINKS', ]\n stop_condition_check_texts = [\n 'Get Tinder Plus',\n ]\n tmp_image_path = None\n\n \n # def is_stop_condition(self):\n # return False\n \n def process_stop_condition(self):\n pass\n\n \n # def collect_data(self):\n\n \n # # crop to dimensions\n # pass\n\n # def reset(self):\n # pass","sub_path":"sources/tinder.py","file_name":"tinder.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"396383921","text":"'''\r\nCreated on 09.09.2017\r\n\r\n@author: Tibu\r\n'''\r\n\r\nimport numpy\r\nfrom de.mathis.InvalidMoveException import InvalidMoveException\r\n\r\n\r\nboard = numpy.array([[-1, -1, 1, 1, 1, -1, -1]\r\n , [-1, -1, 1, 1, 1, -1, -1]\r\n , [1, 1, 1, 1, 1, 1, 1]\r\n , [1, 1, 1, 0, 1, 1, 1]\r\n , [1, 1, 1, 1, 1, 1, 1]\r\n , [-1, -1, 1, 1, 1, -1, -1]\r\n , [-1, -1, 1, 1, 1, -1, -1]]\r\n , numpy.int32)\r\n\r\nlabels = {-1:' ', 0:'\\u00B7', 1:'x'}\r\n\r\nclass SolitaireBoard():\r\n \r\n def _getSymbolForValue(self, value):\r\n return labels[value]\r\n \r\n def printBoard(self):\r\n print('+' + '-' * 15 + '+')\r\n for row in range(len(board)):\r\n print('| ' + ' '.join(self._getSymbolForValue(board[row][p]) for p in range(len(board[row]))) + ' |') \r\n print('+' + '-' * 15 + '+')\r\n \r\n def printBoardAndIndices(self):\r\n print('+' + '-' * 15 + '+' + ' ' + '+' + '-' * 22 + '+') \r\n for row in range(len(board)):\r\n rowLen=len(board[row])\r\n print('| ' + ' '.join(self._getSymbolForValue(board[row][cell]) for cell in range(rowLen)) + ' |' \r\n + ' '\r\n + '| ' + ' '.join('{:>2}'.format( ' ' if self._getValueOfField(row*rowLen+cell) < 0 else str(row*rowLen+cell)) for cell in range(rowLen)) + ' |')\r\n print('+' + '-' * 15 + '+' + ' ' + '+' + '-' * 22 + '+') \r\n \r\n def printIndices(self):\r\n print('+' + '-' * 22 + '+')\r\n for i in range(7):\r\n print('| ' + ' '.join('{:>2}'.format(str(i*7+j)) for j in range(7)) + ' |') \r\n print('+' + '-' * 22 + '+')\r\n\r\n def _indexToRow(self, index):\r\n row = int(index / 7) \r\n return row\r\n \r\n def _indexToCell(self, index):\r\n cell = int(index % 7)\r\n return cell\r\n\r\n def _getValueOfField(self, index):\r\n return board[self._indexToRow(index)][self._indexToCell(index)]\r\n\r\n def getField(self, index):\r\n return self._getSymbolForValue(self._getValueOfField(index))\r\n\r\n def moveAndPrint(self,current, target):\r\n self.move(current, target)\r\n self.printBoardAndIndices()\r\n \r\n def move(self, current, target):\r\n print('Moving {} to {}...'.format(current, target))\r\n \r\n rowA = self._indexToRow(current)\r\n rowB = self._indexToRow(target)\r\n colA = self._indexToCell(current)\r\n colB = self._indexToCell(target)\r\n\r\n if rowA != rowB and colA != colB:\r\n raise InvalidMoveException(\"Cant move diagonal (from {} to {})\".format(current, target))\r\n \r\n horizontalMove = rowA == rowB\r\n \r\n distance = abs(colA- colB) if horizontalMove else abs(rowA- rowB)\r\n currentField = board[rowA][colA]\r\n targetField = board[rowB][colB]\r\n \r\n betweenIndex = -1\r\n \r\n if horizontalMove:\r\n betweenIndex = current - 1 if colA > colB else target - 1\r\n else:\r\n betweenIndex = current - 7 if rowA > rowB else target - 7\r\n \r\n betweenField = self._getValueOfField(betweenIndex)\r\n\r\n if distance != 2 :\r\n raise InvalidMoveException('illegal move: from {} to {}'.format(current, target))\r\n if currentField != 1 :\r\n raise InvalidMoveException('no stone at current {} (found {} instead)'.format(current, self._getSymbolForValue(currentField)))\r\n if(targetField != 0):\r\n raise InvalidMoveException('no empty field at targetField {} (found {} instead)'.format(target, self._getSymbolForValue(targetField)))\r\n if(betweenField != 1):\r\n raise InvalidMoveException('cant jump over {}'.format(self._getSymbolForValue(betweenField)))\r\n \r\n board[self._indexToRow(betweenIndex)][self._indexToCell(betweenIndex)] = 0\r\n board[rowB][colB] = 1\r\n board[rowA][colA] = 0\r\n \r\n ","sub_path":"Solitaire/de/mathis/SolitaireBoard.py","file_name":"SolitaireBoard.py","file_ext":"py","file_size_in_byte":3996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"219788398","text":"\"\"\"empty message\n\nRevision ID: 0c35ecf674d0\nRevises: 9360db754e94\nCreate Date: 2016-04-30 15:52:30.053974\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '0c35ecf674d0'\ndown_revision = '9360db754e94'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('user')\n op.drop_constraint('courses_fkey', 'courses', type_='foreignkey')\n op.create_foreign_key(None, 'courses', 'departments', ['department'], ['department'], onupdate='CASCADE', ondelete='CASCADE')\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'courses', type_='foreignkey')\n op.create_foreign_key('courses_fkey', 'courses', 'departments', ['department'], ['department'])\n op.create_table('user',\n sa.Column('id', sa.INTEGER(), nullable=False),\n sa.Column('name', sa.VARCHAR(length=128), autoincrement=False, nullable=True),\n sa.PrimaryKeyConstraint('id', name='user_pkey')\n )\n ### end Alembic commands ###\n","sub_path":"migrations/versions/0c35ecf674d0_.py","file_name":"0c35ecf674d0_.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"18213448","text":"# -*- coding:utf-8 -*-\nclass Solution:\n def NumberOf1Between1AndN_Solution(self, n):\n # write code here\n # 求出1~13的整数中1出现的次数,并算出100~1300的整数中1出现的次数?\n # 为此他特别数了一下1~13中包含1的数字有1、10、11、12、13因此共出现6次,\n # 但是对于后面问题他就没辙了。ACMer希望你们帮帮他,并把问题更加普遍化,\n # 可以很快的求出任意非负整数区间中1出现的次数(从1 到 n 中1出现的次数)。\n count = 0\n for i in range(1, n+1):\n count += str(i).count('1')\n return count\n # better\n\n # res=0\n # tmp=n\n # base=1\n # while tmp:\n # last=tmp%10\n # tmp=tmp/10\n # res+=tmp*base\n # if last==1:\n # res+=n%base+1\n # elif last>1:\n # res+=base\n # base*=10\n # return res","sub_path":"OFFER/整数中1出现的次数(从1到n整数中1出现的次数)/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"365778254","text":"from __future__ import unicode_literals\n\n\nimport gunicorn.app.base\n\nfrom gunicorn.six import iteritems\n\nimport os\nimport falcon\nfrom zap import configure, zap\n\n# http://docs.gunicorn.org/en/latest/custom.html\nclass StandaloneApplication(gunicorn.app.base.BaseApplication):\n def __init__(self, app, options=None):\n self.options = options or {}\n self.application = app\n super(StandaloneApplication, self).__init__()\n\n def load_config(self):\n config = dict([(key, value) for key, value in iteritems(self.options)\n if key in self.cfg.settings and value is not None])\n for key, value in iteritems(config):\n self.cfg.set(key.lower(), value)\n\n def load(self):\n return self.application\n\n\nif __name__ == '__main__':\n cfg_file = os.environ.get('REPROCFG') or os.path.join(os.path.dirname(__file__), './conf/zap.yaml')\n config = configure.LoadYaml().from_yaml_file(cfg_file)\n endpoints = configure.LoadYaml().getendpointconfig(config)\n print(endpoints)\n router = zap.Zap(endpoints)\n\n StandaloneApplication(router.runner(), configure.LoadYaml().getserviceconfig(config)).run()","sub_path":"api/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"457195277","text":"import pickle\n\nfrom rdflib import ConjunctiveGraph\nfrom rdflib import RDFS\nfrom rdflib import URIRef\n\nfrom efo_graph import DiseaseData\nfrom efo_graph.DiseaseNode import DiseaseNode\nfrom util import misc\nfrom util import rdflib_utils\n\n__author__ = 'wnewell'\n\nsub_class_of_predicate = RDFS.subClassOf\nsuper_class_of_disease = URIRef('super_class_of_disease')\n\n\ndef main():\n print(\"main()\")\n test()\n\n\ndef test():\n print(\"test()\")\n # tree_triples = DiseaseData.make_single_inheritance_triples()\n # tree_triples = DiseaseData.make_multiple_inheritance_triples()\n g = make_rdf_graph()\n disease_graph = DiseaseGraph(g)\n misc.getchar()\n # print(\"disease_graph: {}\".format(disease_graph))\n misc.getchar()\n # print(\"repr(disease_graph): {}\".format(repr(disease_graph)))\n disease_graph.traverse_graph()\n\n\ndef make_rdf_graph():\n # g = make_single_inheritance_graph()\n # g = make_multiple_inheritance_graph()\n g = make_efo_graph()\n return g\n\n\ndef make_single_inheritance_graph():\n test_triples = DiseaseData.make_single_inheritance_triples()\n g = make_graph(test_triples)\n return g\n\n\ndef make_multiple_inheritance_graph():\n test_triples = DiseaseData.make_multiple_inheritance_triples()\n g = make_graph(test_triples)\n return g\n\n\ndef make_graph(triples):\n g = ConjunctiveGraph()\n for triple in triples:\n g.add(triple)\n return g\n\n\ndef make_efo_graph():\n pickle_file = open('pickled_graph_2', 'rb')\n g = pickle.load(pickle_file)\n return g\n\n\n# def make_rdf_graph(test_triples):\n# print(\"\\nmake_graph from test_triples, len: {}\".format(len(test_triples)))\n# g = ConjunctiveGraph()\n# for triple in test_triples:\n# g.add(triple)\n# print(\"g: {}, class: {}, len: {}\".format(g, g.__class__, len(g)))\n# return g\n\n\nclass DiseaseGraph:\n def __init__(self, g):\n self.g = g\n self.root = None\n self.node_map = {}\n self.node_cnt = 0\n self.print_rdf_tree_from_root(g)\n self.make_node_graph(g)\n\n def print_rdf_tree_from_root(self, g):\n print(\"\\n** print_tree, len(g): {}\".format(len(g)))\n disease_root_node = self.get_disease_root_node(g)\n print(\"disease_root_node: {}\".format(disease_root_node))\n self.print_rdf_tree(disease_root_node, 0, g)\n\n def get_disease_root_node(self, g):\n obj = super_class_of_disease\n disease_roots = list(g.subjects(RDFS.subClassOf, obj))\n if (disease_roots):\n return disease_roots[0]\n else:\n # EFO graph\n disease_root = rdflib_utils.get_subject_from_label(g, 'disease')\n return disease_root\n\n def print_rdf_tree(self, node, level, g):\n self.node_cnt += 1\n leader_str = \"|...\" * level\n print('{:3d}. {}{}'.format(self.node_cnt, leader_str, node))\n children = sorted(list(g.subjects(sub_class_of_predicate, node)))\n for child in children:\n self.print_rdf_tree(child, level + 1, g)\n\n def make_node_graph(self, g):\n print(\"\\n\\n** make_node_graph()\")\n disease_root_node = self.get_disease_root_node(g)\n print(\"disease_root_node: {}\".format(disease_root_node))\n graph_node_hash = {}\n # self.make_node_graph_from_root(disease_root_node, graph_node_hash, g)= 0\n self.node_cnt = 0\n self.add_node_to_graph(disease_root_node, None, 0, g)\n\n def add_node_to_graph(self, node, parent, level, g):\n self.node_cnt += 1\n leader_str_dash = \"-\" * 4 * level\n leader_str_space = \" \" * 4 * level\n # print(\n # \"\\n{}. {}add_node_to_graph(), node: {}, parent: {}, level: {}\".format(self.node_cnt, leader_str_dash, node,\n # parent, level))\n graph_node = self.get_or_create_graph_node(node, level)\n # print(\"graph_node: {}\".format(graph_node))\n\n # is root node? (parent is None)\n if parent is None:\n self.root = graph_node\n # print(\"made root node, self.root: {}\".format(self.root))\n\n # add parent\n if parent is not None:\n if parent not in graph_node.parents:\n parent_graph_node = self.node_map[parent]\n graph_node.parents.append(parent_graph_node)\n\n # add child\n if parent in self.node_map:\n parent_graph_node = self.node_map[parent]\n if graph_node not in parent_graph_node.children:\n parent_graph_node.children.append(graph_node)\n\n print(\"{}add node to graph. {}\".format(leader_str_space, graph_node))\n\n # recurse through children\n children = sorted(list(g.subjects(sub_class_of_predicate, node)))\n # print(\"children: {}, {}\".format(len(children), children))\n for child in children:\n self.add_node_to_graph(child, node, level + 1, g)\n\n def get_or_create_graph_node(self, node, level):\n # print(\"get_or_create_graph_node(), node: {}, node class: {}, len(node_map): {}\".format(node, node.__class__,\n # len(self.node_map)))\n # print(\"node_map: {}\".format(self.node_map))\n if node in self.node_map:\n # print(\"node: {} is in node_map, k: {}, v: {}\".format(node, node, self.node_map[node]))\n return self.node_map[node]\n else:\n leader_str = \" \" * 4 * level\n # print(\"{}get_or_create_graph_node, node: {}\".format(leader_str, node))\n graph_node = DiseaseNode(node)\n self.node_map[node] = graph_node\n # print(\"get_or_create_graph_node() after insertion into map, node: {}, len(node_map): {}\".format(node, len(self.node_map)))\n return graph_node\n\n def traverse_graph(self):\n print(\"\\n** traverse_graph()\")\n self.print_node_graph(self.root, 0)\n\n def print_node_graph(self, disease_node, level):\n leader_str = \" \" * 4 * level\n print(\"\\n** print_node_graph(), node: {}, class: {}\".format(disease_node, disease_node.__class__))\n # print(\"{}{}\".format(leader_str, node))\n children = disease_node.children\n print(\"children: {}\".format(children))\n paths = \"paths\"\n # paths = self.get_paths_to_root(disease_node)\n paths = self.get_paths_to_root_2(disease_node)\n disease_node.paths = paths\n self.make_efo_instance_from_disease_node(disease_node)\n misc.getchar()\n # print(\"{}{}, children: {}, paths: [[{}]]\".format(leader_str, disease_node, children, paths))\n for child in children:\n print(\"\\n\\nchild: {}, class{}\".format(child, child.__class__))\n self.print_node_graph(child, level + 1)\n\n def get_paths_to_root(self, node):\n print(\"** get_paths_to_root from node: {}, class: {}\".format(node, node.__class__))\n parents = node.parents\n print(\"parents: {}, class: {}\".format(parents, parents.__class__))\n # if parents[0] is not None:\n if parents:\n # if parents[0].name is \"d1\":\n # print(\"have reached root node, return\")\n # return\n for parent in parents:\n print(\"parent: {}\".format(parent))\n self.get_paths_to_root(parent)\n # print(\"have reached root\\n\")\n\n def get_paths_to_root_2(self, disease_node):\n print(\"** get_paths_to_root_2(), disease_node: {}\".format(disease_node))\n print(\"get paths between node: {} and root: {}\".format(disease_node, self.root))\n paths = self.all_paths(disease_node, self.root)\n return paths\n\n # http://introcs.cs.princeton.edu/java/45graph/AllPaths.java.html\n # https: // docs.python.org / 3 / tutorial / datastructures.html\n # path is a stack, stack.append, stack.pop()\n # on_path is a set\n\n\n def all_paths(self, node1, node2):\n print(\"all_paths(), node1: {}, node2: {}\".format(node1, node2))\n paths = []\n path = []\n on_path = set()\n self.enumerate_paths(node1, node2, path, on_path, paths)\n # print(\"found all paths: len(paths): {}, paths: {}\".format(len(paths), paths))\n # paths_set = list(set(paths))\n # make a set of tuples\n paths_set = set()\n for path in paths:\n path_tuple = tuple(path)\n # print(\"path_tuple: {}\".format(path_tuple))\n paths_set.add(path_tuple)\n # paths = list(paths_set).sort()\n # print(\"paths_set: {}, {}\".format(len(paths_set), paths_set))\n paths_list = list(paths_set)\n # print(\"paths_list: {}, {}\".format(len(paths_list), paths_list))\n print(\"paths_list:\")\n misc.print_list(paths_list)\n # print(\"paths_set: len(paths_set): {}, paths_set: {}\".format(len(paths_set), paths_set))\n return paths_list\n\n def enumerate_paths(self, node1, node2, path, on_path, paths):\n # print(\"enumerate_paths, node1: {}, node2: {}, paths: {}, on_path: {}\".format(node1, node2, path, on_path))\n # add node node1 to current path from node1\n path.append(node1)\n on_path.add(node1)\n\n # found path from node1 to node2\n if node1 is node2:\n # print(\"found path: {}\".format(path))\n finished_path = list(path)\n paths.append(finished_path)\n\n # get all neighbours that would continue path without repeating a node\n else:\n for parent in node1.parents:\n if parent not in on_path:\n self.enumerate_paths(parent, node2, path, on_path, paths)\n\n # done exploring from node1, so remove from path\n path.pop()\n on_path.remove(node1)\n\n def make_efo_instance_from_disease_node(self, disease_node):\n print(\"make_efo_instance, disease_node: {}, class: {}\".format(disease_node, disease_node.__class__))\n rdf_node = disease_node.rdf_node\n print(\"rdf_node: {}, class: {}\".format(rdf_node, rdf_node.__class__))\n disease_node.rdf_properties = self.parse_properties(rdf_node)\n self.make_paths(disease_node)\n\n def parse_properties(self, rdf_node):\n print(\"parse_properties for rdf_node: {}\".format(rdf_node))\n raw_properties = rdflib_utils.get_subject_properties(self.g, rdf_node)\n rdf_properties = {}\n print(\"raw_properties for rdf_node: {}\".format(rdf_node))\n for index, property in enumerate(raw_properties):\n print(\"{}. {}\".format(index, property))\n property_name = str(property[0])\n property_value = str(property[1])\n # rdf_properties[property_name].append(property_value)\n if property_name in rdf_properties:\n rdf_properties[property_name].append(property_value)\n else:\n rdf_properties[property_name] = [property_value]\n print(\"rdf_properties: {}\".format(rdf_properties))\n for index, key in enumerate(rdf_properties):\n rdf_values = list(map(str, rdf_properties[key]))\n print(\"{}. k: {}, v: {}, {}\".format(index, key, len(rdf_values), rdf_values))\n if 'http://www.ebi.ac.uk/efo/alternative_term' in rdf_properties:\n alternative_terms = rdf_properties['http://www.ebi.ac.uk/efo/alternative_term']\n print(\"alternative_terms: {}, {}\".format(len(alternative_terms), alternative_terms))\n else:\n print(\"no alternative terms\")\n if 'http://www.w3.org/2000/01/rdf-schema#label' in rdf_properties:\n label = rdf_properties['http://www.w3.org/2000/01/rdf-schema#label']\n print(\"label: {}, {}\".format(len(label), label))\n else:\n print(\"no label\")\n return rdf_properties\n\n def make_paths(self, disease_node):\n paths = disease_node.paths\n rdf_node = disease_node.rdf_node\n print(\"make_paths, paths: {}, {}\".format(len(paths), paths))\n # misc.print_list(paths)\n for index, path in enumerate(paths):\n print(\"{}. {}\".format(index, path))\n path_strings = list(map(str, path))\n print(\"path_strings: {}\".format(path_strings))\n # path_codes = list(map(split('\\t')[0], path))\n # print(\"path_codes: {}\".format(path_codes))\n self.make_codes_path(path)\n self.make_labels_path(path, rdf_node)\n\n def make_codes_path(self, path):\n print(\"make_codes_path(), path: {}\".format(path))\n path_codes = []\n for index, path_step in enumerate(path):\n code = str(path_step).split('/')[-1]\n path_codes.append(code)\n print(\"{}, path_step:{}, code: {}\".format(index, path_step, code))\n print(\"codes: {}, {}\".format(len(path_codes), path_codes))\n\n def make_labels_path(self, path, rdf_node):\n print(\"make_labels_path(), rdf_node: {}, path: {}, {}\".format(rdf_node, len(path), path))\n path_labels = []\n for index, path_step in enumerate(path):\n # labels = list(self.g.objects(rdf_node, 'http://www.w3.org/2000/01/rdf-schema#label'))\n # labels = list(self.g.objects(path_step, RDFS.label))\n # if labels:\n # label = labels[0]\n # else:\n # label = \"No Label\"\n label = rdflib_utils.get_label(rdf_node, self.g)\n path_labels.append(label)\n print(\"{}, path_step:{}, label: {}\".format(index, path_step, label))\n print(\"labels: {}, {}\".format(len(path_labels), path_labels))\n\n def __repr__(self):\n return \"\"\"\n 1. __repr__\n 2. abc\n 3. xyz\n \"\"\"\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"efo_graph/DiseaseGraph.py","file_name":"DiseaseGraph.py","file_ext":"py","file_size_in_byte":13689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"48654618","text":"from keras.applications.resnet50 import ResNet50\nfrom keras.preprocessing import image\nfrom keras.applications.resnet50 import preprocess_input, decode_predictions\nfrom keras.models import Model\nimport numpy as np\nfrom os import listdir, walk\nfrom os.path import isfile, join\nimport itertools\n\ndef list_all_files_in(path):\n return [join(path, f) for f in listdir(path) if isfile(join(path, f))]\n\ndef predict(img_path, model):\n img = image.load_img(img_path, target_size=(224, 224))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n return model.predict(x)\n\ndef find_difference(f1, f2):\n return np.linalg.norm(f1-f2)\n\ndef find_differences(feature_vectors):\n similar = {}\n keys = [k for k,v in feature_vectors.items()]\n _min = {}\n for k in keys:\n _min[k] = 10000000\n possible_combinations=list(itertools.combinations(keys, 2))\n for k,v in possible_combinations:\n diff=find_difference(feature_vectors[k],feature_vectors[v])\n if(diff < _min[k]):\n _min[k] = diff\n similar[k] = v\n _min[v] = diff\n similar[v] = k\n return similar, diff\n\n\ndef load_image_db():\n image_db = []\n with open('controls.json', 'r') as image_db_f:\n image_db = json.load(image_db_f)\n\n image_db_indexed = {}\n for img in image_db:\n image_db_indexed[img['image']] = img['control']\n return image_db_indexed\n\n\ndef driver():\n image_db = load_image_db()\n feature_vectors = {}\n model = ResNet50(weights='imagenet')\n for img_path in list_all_files_in(\"/home/bot/catkin_ws/data/images\"):\n feature_vectors[img_path.split('/')[-1]] = predict(img_path, model)[0]\n results, diff = find_differences(feature_vectors)\n for k,v in results.items():\n print((k, image_db[k]['control'], v, image_db[v]['control'], diff)) \n #print('Predicted:', decode_predictions(preds, top=3)[0])\n\ndriver()\n\n# Output Result\n\n# images/shoe.jpg is most similar to: images/shoe1.jpg\n# images/shoe1.jpg is most similar to: images/shoe.jpg\n# images/bikini.jpg is most similar to: images/dress.jpeg\n# images/dress.jpeg is most similar to: images/bikini.jpg\n# images/bear.jpg is most similar to: images/printer1.jpg\n# images/printer1.jpg is most similar to: images/printer2.jpg\n# images/coil1.jpeg is most similar to: images/printer1.jpg\n# images/printer2.jpg is most similar to: images/printer1.jpg","sub_path":"intelligence/scripts/image_similarity.py","file_name":"image_similarity.py","file_ext":"py","file_size_in_byte":2405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"479282935","text":"\"\"\"\r\n\n\nYou were bored, so you decided to try out a new game you recently downloaded.\nThere are five types of characters, each with their own level of attack power,\ndefense, and speed. There are also five types of armor, weapons, and boots.\nEach type of item has a different cost of gold and a different level of attack\npower, defense or speed.\n\nCreate a function that takes the type of character and the amount of gold. The\nfunction should return the maximum amount of attack power possible, the\nmaximum amount of defense possible, and the maximum speed possible in a list,\nin that order.\n\n### Examples\n\n max_stats(\"Robot\", 160) ➞ [210, 220, 26]\n \n max_stats(\"Fairy\", 50) ➞ [91, 120, 22]\n \n max_stats(\"Warrior\", 70) ➞ [210, 211, 14]\n\n### Notes\n\n * Calculate the attack power, defense, and speed seperately. Do not calculate combinations of items.\n * Check the **Resources** tab for the list of characters and items.\n * **Hint:** Add the character's stats to the items' stats for the result.\n\n\"\"\"\r\n\n# Check the Resources tab for the list of characters and items.\ndef check_stats(name):\n print(\"Challo\")\n print(name)\n if name == \"Knight\":\n stats = [120, 140, 6]\n if name == \"Warrior\":\n stats = [180, 71, 8]\n if name == \"Fairy\":\n stats = [71, 100, 16]\n if name == \"Robot\":\n stats = [160, 120, 11]\n if name ==\"Giant\":\n stats = [160, 200, 4]\n return stats\nstats = []\nweapons = [ [10, 20] , [20, 40] , [30, 60] , [40, 80] , [50,100] ]\narmor = [ [20,30,] , [40,60] , [60,90] , [80, 120] , [100,150] ]\nboots = [ [3,24] , [6,48] , [9,72] , [12,96] , [15,120] ]\ndef max_stats(character, gold):\n return_array = []\n stats = check_stats(character)\n print(character)\n print(stats)\n max_choice = 0;\n for choice in weapons:\n if choice[1] <= gold:\n max_choice = choice[0]\n return_array.append(stats[0]+max_choice)\n max_choice = 0;\n print(return_array)\n for choice in armor:\n if choice[1] <= gold:\n max_choice = choice[0]\n return_array.append(stats[1]+max_choice)\n max_choice = 0;\n print(return_array)\n for choice in boots:\n if choice[1] <= gold:\n max_choice = choice[0]\n return_array.append(stats[2]+max_choice)\n print(return_array)\n return return_array\n\n","sub_path":"68omQmgQEwv8558ZK_18.py","file_name":"68omQmgQEwv8558ZK_18.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"343885578","text":"#!/usr/bin/env python\n# coding: utf-8\nimport numpy as np\nfrom scipy.stats import pearsonr,spearmanr\nimport sys\nfrom os.path import join\nsys.path.insert(1, join(sys.path[0], 'train_model'))\n\nfrom train_model import sent_util\n\nimport torch\nfrom torchtext import data, datasets\nimport pandas as pd\n\n# To train model, first run 'train.py' from train_model dir\n\n# get model path, OS safe\n\nsnapshot_dir = 'results_sst/'\nsnapshot_file = join(snapshot_dir, 'best_snapshot_devacc_79.35779571533203_devloss_0.41613781452178955_iter_9000_model.pt')\n\n# get model\nmodel = sent_util.get_model(snapshot_file)\n\n# get data\ninputs, answers, train_iterator, dev_iterator = sent_util.get_sst()\n\nbatch_nums = list(range(6920))\ndata = sent_util.get_batches(batch_nums, train_iterator, dev_iterator)\n\n\n# get list of data with different predicted and true label\nlist_diff_label = list()\nlist_cd = np.zeros(1)\nlist_ig = np.zeros(1)\n\nfor ind in range(6919):\n\tif sent_util.diff_predicted_label(data[ind], model, answers):\n\t\tpred, list_scores_cd = sent_util.CD_unigram(data[ind], model, inputs, answers)\n\t\tlist_cd = np.append(list_cd,list_scores_cd, axis = 0)\n\t\tpred, list_scores_ig = sent_util.integrated_gradients_unigram(data[ind], model, inputs, answers)\n\t\tlist_ig = np.append(list_ig,list_scores_ig, axis = 0)\n\t\tlist_diff_label.append(ind)\n\nprint(\"______________________________________\")\npearson_corr, _ = pearsonr(list_cd,list_ig)\nprint(\"Pearson Correlation\", pearson_corr)\nspearman_corr, _ = spearmanr(list_cd,list_ig)\nprint(\"Spearman Correlation\", spearman_corr)\nprint(\"Covariance\", np.cov(list_cd,list_ig))\nprint()\nprint(\"list of indeces of inputs with differering predicted and true labels:\", len(list_diff_label), \"/\", 6920)\nprint()\nfor i in list_diff_label:\n\tprint(i)","sub_path":"get_diff_label.py","file_name":"get_diff_label.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"120902437","text":"import pygame, sys, random, time, copy\nfrom pygame.locals import *\nfrom pygame import Color as pyColor\nfrom rotateXY import rotate90\n\nfps = 25 \nscreenwidth,screenheight = 800,600\nboxsize = 25 \nboardwidth,boardheight= 10,20\nblank = '.'\n\nup,down,left,right = 'up','down','left','right'\n\nxmargin = (screenwidth - boardwidth * boxsize) /2\ntopmargin = screenheight - boardheight * boxsize - 5\n\n# R G B\ngray = pyColor('gray')\nblack = pyColor('black')\nc1 = white,red,blue,green,yellow \\\n = pyColor('white'),pyColor('red'),\\\n pyColor('blue'),pyColor('green'),pyColor('yellow')\nc2 = skyblue,steelblue,royalblue,cyan \\\n = pyColor('skyblue'),pyColor('steelblue'),pyColor('royalblue'),pyColor('cyan')\nc3 = purple,orange,pink \\\n = pyColor('purple'),pyColor('orange'),pyColor('pink')\nc4 = khaki,yellowgreen \\\n = pyColor('khaki'), pyColor('yellowgreen')\ncolors = [ color for color in c1+c2+c3+c4]\n\nbordercolor1 = 55,55,25 #40,40,50 #blue #20,20,20 #blue\nbordercolor2 = 45,45,25 #blue\ngridcolor1 = 35,35,35\ngridcolor2 = 50,50,50\nbgcolor = black\nscreenbgcolor= 0,15,30 #00221D\n#boardbgcolor = bgcolor #20,20,20 \ntextcolor = white\ntextshadowcolor = gray\n#gridcolor = boardbgcolor[0]+40,boardbgcolor[1]+40, boardbgcolor[2]+ 40\n#gridcolor2 = boardbgcolor[0]+50,boardbgcolor[1]+50, boardbgcolor[2]+50\n\n#------------------ tetris shapes ---------------------\n\ntetrisS = [\n list( '.....' ),\n list( '.....' ),\n list( '..OO.' ),\n list( '.OO..' ),\n list( '.....' )\n ]\n\ntetrisZ = [\n list( '.....' ),\n list( '.....' ),\n list( '.OO..' ),\n list( '..OO.' ),\n list( '.....' )\n]\n\ntetrisI = [\n list( '..O..' ),\n list( '..O..' ),\n list( '..O..' ),\n list( '..O..' ),\n list( '.....' )\n]\n\ntetrisO = [\n list( '.....' ),\n list( '.....' ),\n list( '.OO..' ),\n list( '.OO..' ),\n list( '.....' )\n]\n\ntetrisJ = [\n list( '.....' ),\n list( '.O...' ),\n list( '.OOO.' ),\n list( '.....' ),\n list( '.....' )\n]\n\ntetrisL = [\n list( '.....' ),\n list( '...O.' ),\n list( '.OOO.' ),\n list( '.....' ),\n list( '.....' )\n]\n\ntetrisT = [\n list( '.....' ),\n list( '..O..' ),\n list( '.OOO.' ),\n list( '.....' ),\n list( '.....' )\n]\n\n","sub_path":"pygame/makinggamewithpygame/tetromino/constants4.py","file_name":"constants4.py","file_ext":"py","file_size_in_byte":2196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"536165913","text":"\r\nfrom selenium import webdriver\r\nimport requests, json\r\nimport pandas,time\r\nfrom selenium.webdriver.common.keys import Keys\r\n\r\n# 크롬 드라이버로 채팅방 열기\r\ndriver = webdriver.Chrome('C:/chromedriver')\r\ndriver.implicitly_wait(3)\r\ndriver.get('https://center-pf.kakao.com/_EvRij/chats')\r\ndriver.find_element_by_id('loginEmail').send_keys('skuinc.internship@skuniv.ac.kr') # 아이디\r\ndriver.find_element_by_id('loginPw').send_keys('!@#$intern12') # 비번\r\ndriver.find_element_by_xpath('//*[@id=\"login-form\"]/fieldset/button').click()\r\n\r\n# 채팅방 들어가기\r\nchat_list = driver.find_element_by_xpath('//*[@id=\"mArticle\"]/div[2]/div[3]/div/div').find_elements_by_tag_name('li')\r\nmain_window = driver.window_handles[0] # 부모창\r\n\r\n# 채팅방 하나씩 순회\r\nfor chat_room in chat_list:\r\n chat_room.click()\r\n driver.switch_to_window(driver.window_handles[1])\r\n # 상담자\r\n user_text = driver.find_element_by_xpath('//*[@id=\"kakaoWrap\"]/div[1]/div[1]/div[1]/div/div/strong').text\r\n user_text = user_text.split('\\n')\r\n chat_user = user_text[1]\r\n # 담당자\r\n chat_manager = driver.find_element_by_class_name('tit_profile').text\r\n scroll_count = 30\r\n while scroll_count > 0: # 50번 반복\r\n time.sleep(0.1)\r\n elem = driver.find_element_by_tag_name('body') # 바디였어!! 스크롤은 바디였어!!! - if 이미지 뜨면 처리해야함\r\n elem.click()\r\n elem.send_keys(Keys.HOME)\r\n scroll_count-=1\r\n elem.click()\r\n elem.send_keys(Keys.HOME)\r\n\r\n # print('보낸이 \\t날짜 \\t시간 \\t메세지 \\t첨부url')\r\n chats_by_date = driver.find_elements_by_xpath('//*[@id=\"room\"]/div/div') # 날짜별 박스들 잡기\r\n for chat_by_date in chats_by_date:\r\n chat_date = chat_by_date.find_element_by_class_name('emph_date') # 날짜별 박스 안에 날짜뽑기\r\n\r\n chats = chat_by_date.find_elements_by_class_name('item_chat') # 박스 안에 대화상자들 뽑기\r\n print('--------------- 날짜 : '+ chat_date.text+'-----------------')\r\n for chat in chats:\r\n print('이름: ' + chat_user)\r\n print('상담자 : ' + chat_manager)\r\n print('날짜: ' + chat_date.text)\r\n try:\r\n print('시간 : '+ chat.find_element_by_class_name('txt_time').text)\r\n except Exception:\r\n pass\r\n print('내용: ' + chat.find_element_by_class_name('set_chat').text) # 메시지 출력\r\n try:\r\n print('첨부 : '+ chat.find_element_by_class_name('link_pic').get_attribute('href'))\r\n except Exception:\r\n pass\r\n print('-----------------')\r\n driver.close()\r\n driver.switch_to_window(main_window)\r\n i+=1\r\n\r\n# 보냄 : #room > div > div:nth-child(3) > div:nth-child(2) > div.wrap_cont > strong\r\n# 날짜 : #room > div > div:nth-child(2) > div.bg_line > em > span\r\n# 시간 : #room > div > div:nth-child(2) > div.item_chat.item_save > div.wrap_cont > span > span\r\n# 대화 : #room > div > div:nth-child(3) > div:nth-child(4) > div.wrap_cont > div > div > div > p > span\r\n# 첨부 : #room > div > div:nth-child(3) > div:nth-child(3) > div > div > a.link_pic\r\n","sub_path":"ex6 구글제외.py","file_name":"ex6 구글제외.py","file_ext":"py","file_size_in_byte":3269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"88096103","text":"\"\"\"Particle Swarm Optimization-based algorithms.\n\"\"\"\n\nimport copy\n\nimport numpy as np\n\nimport opytimizer.math.random as r\nimport opytimizer.utils.constant as c\nimport opytimizer.utils.exception as e\nimport opytimizer.utils.logging as l\nfrom opytimizer.core import Optimizer\n\nlogger = l.get_logger(__name__)\n\n\nclass PSO(Optimizer):\n \"\"\"A PSO class, inherited from Optimizer.\n\n This is the designed class to define PSO-related\n variables and methods.\n\n References:\n J. Kennedy, R. C. Eberhart and Y. Shi. Swarm intelligence.\n Artificial Intelligence (2001).\n\n \"\"\"\n\n def __init__(self, params=None):\n \"\"\"Initialization method.\n\n Args:\n params (dict): Contains key-value parameters to the meta-heuristics.\n\n \"\"\"\n\n logger.info('Overriding class: Optimizer -> PSO.')\n\n # Overrides its parent class with the receiving params\n super(PSO, self).__init__()\n\n # Inertia weight\n self.w = 0.7\n\n # Cognitive constant\n self.c1 = 1.7\n\n # Social constant\n self.c2 = 1.7\n\n # Builds the class\n self.build(params)\n\n logger.info('Class overrided.')\n\n @property\n def w(self):\n \"\"\"float: Inertia weight.\n\n \"\"\"\n\n return self._w\n\n @w.setter\n def w(self, w):\n if not isinstance(w, (float, int)):\n raise e.TypeError('`w` should be a float or integer')\n if w < 0:\n raise e.ValueError('`w` should be >= 0')\n\n self._w = w\n\n @property\n def c1(self):\n \"\"\"float: Cognitive constant.\n\n \"\"\"\n\n return self._c1\n\n @c1.setter\n def c1(self, c1):\n if not isinstance(c1, (float, int)):\n raise e.TypeError('`c1` should be a float or integer')\n if c1 < 0:\n raise e.ValueError('`c1` should be >= 0')\n\n self._c1 = c1\n\n @property\n def c2(self):\n \"\"\"float: Social constant.\n\n \"\"\"\n\n return self._c2\n\n @c2.setter\n def c2(self, c2):\n if not isinstance(c2, (float, int)):\n raise e.TypeError('`c2` should be a float or integer')\n if c2 < 0:\n raise e.ValueError('`c2` should be >= 0')\n\n self._c2 = c2\n\n @property\n def local_position(self):\n \"\"\"np.array: Array of velocities.\n\n \"\"\"\n\n return self._local_position\n\n @local_position.setter\n def local_position(self, local_position):\n if not isinstance(local_position, np.ndarray):\n raise e.TypeError('`local_position` should be a numpy array')\n\n self._local_position = local_position\n\n @property\n def velocity(self):\n \"\"\"np.array: Array of velocities.\n\n \"\"\"\n\n return self._velocity\n\n @velocity.setter\n def velocity(self, velocity):\n if not isinstance(velocity, np.ndarray):\n raise e.TypeError('`velocity` should be a numpy array')\n\n self._velocity = velocity\n\n def compile(self, space):\n \"\"\"Compiles additional information that is used by this optimizer.\n\n Args:\n space (Space): A Space object containing meta-information.\n\n \"\"\"\n\n # Arrays of local positions and velocities\n self.local_position = np.zeros((space.n_agents, space.n_variables, space.n_dimensions))\n self.velocity = np.zeros((space.n_agents, space.n_variables, space.n_dimensions))\n\n def evaluate(self, space, function):\n \"\"\"Evaluates the search space according to the objective function.\n\n Args:\n space (Space): A Space object that will be evaluated.\n function (Function): A Function object that will be used as the objective function.\n\n \"\"\"\n\n # Iterates through all agents\n for i, agent in enumerate(space.agents):\n # Calculates the fitness value of current agent\n fit = function(agent.position)\n\n # If fitness is better than agent's best fit\n if fit < agent.fit:\n # Updates its current fitness to the newer one\n agent.fit = fit\n\n # Also updates the local best position to current's agent position\n self.local_position[i] = copy.deepcopy(agent.position)\n\n # If agent's fitness is better than global fitness\n if agent.fit < space.best_agent.fit:\n # Makes a deep copy of agent's local best position and fitness to the best agent\n space.best_agent.position = copy.deepcopy(self.local_position[i])\n space.best_agent.fit = copy.deepcopy(agent.fit)\n\n def update(self, space):\n \"\"\"Wraps Particle Swarm Optimization over all agents and variables.\n\n Args:\n space (Space): Space containing agents and update-related information.\n\n \"\"\"\n\n # Iterates through all agents\n for i, agent in enumerate(space.agents):\n # Generates random numbers\n r1 = r.generate_uniform_random_number()\n r2 = r.generate_uniform_random_number()\n\n # Updates agent's velocity (p. 294)\n self.velocity[i] = self.w * self.velocity[i] + \\\n self.c1 * r1 * (self.local_position[i] - agent.position) + \\\n self.c2 * r2 * (space.best_agent.position - agent.position)\n\n # Updates agent's position (p. 294)\n agent.position += self.velocity[i]\n\n\nclass AIWPSO(PSO):\n \"\"\"An AIWPSO class, inherited from PSO.\n\n This is the designed class to define AIWPSO-related\n variables and methods.\n\n References:\n A. Nickabadi, M. M. Ebadzadeh and R. Safabakhsh.\n A novel particle swarm optimization algorithm with adaptive inertia weight.\n Applied Soft Computing (2011).\n\n \"\"\"\n\n def __init__(self, params=None):\n \"\"\"Initialization method.\n\n Args:\n params (dict): Contains key-value parameters to the meta-heuristics.\n\n \"\"\"\n\n logger.info('Overriding class: PSO -> AIWPSO.')\n\n # Minimum inertia weight\n self.w_min = 0.1\n\n # Maximum inertia weight\n self.w_max = 0.9\n\n # Overrides its parent class with the receiving params\n super(AIWPSO, self).__init__(params)\n\n logger.info('Class overrided.')\n\n @property\n def w_min(self):\n \"\"\"float: Minimum inertia weight.\n\n \"\"\"\n\n return self._w_min\n\n @w_min.setter\n def w_min(self, w_min):\n if not isinstance(w_min, (float, int)):\n raise e.TypeError('`w_min` should be a float or integer')\n if w_min < 0:\n raise e.ValueError('`w_min` should be >= 0')\n\n self._w_min = w_min\n\n @property\n def w_max(self):\n \"\"\"float: Maximum inertia weight.\n\n \"\"\"\n\n return self._w_max\n\n @w_max.setter\n def w_max(self, w_max):\n if not isinstance(w_max, (float, int)):\n raise e.TypeError('`w_max` should be a float or integer')\n if w_max < 0:\n raise e.ValueError('`w_max` should be >= 0')\n if w_max < self.w_min:\n raise e.ValueError('`w_max` should be >= `w_min`')\n\n self._w_max = w_max\n\n @property\n def fitness(self):\n \"\"\"list: List of fitnesses.\n\n \"\"\"\n\n return self._fitness\n\n @fitness.setter\n def fitness(self, fitness):\n if not isinstance(fitness, list):\n raise e.TypeError('`fitness` should be a list')\n\n self._fitness = fitness\n\n def _compute_success(self, agents):\n \"\"\"Computes the particles' success for updating inertia weight (eq. 16).\n\n Args:\n agents (list): List of agents.\n\n \"\"\"\n\n # Initial counter\n p = 0\n\n # Iterates through every agent\n for i, agent in enumerate(agents):\n # If current agent fitness is smaller than its best\n if agent.fit < self.fitness[i]:\n # Increments the counter\n p += 1\n\n # Replaces fitness with current agent's fitness\n self.fitness[i] = agent.fit\n\n # Update inertia weight value\n self.w = (self.w_max - self.w_min) * (p / len(agents)) + self.w_min\n\n def update(self, space, iteration):\n \"\"\"Wraps Adaptive Inertia Weight Particle Swarm Optimization over all agents and variables.\n\n Args:\n space (Space): Space containing agents and update-related information.\n iteration (int): Current iteration.\n\n \"\"\"\n\n # Checks if it is the first iteration\n if iteration == 0:\n # Creates a list of initial fitnesses\n self.fitness = [agent.fit for agent in space.agents]\n\n # Iterates through all agents\n for i, agent in enumerate(space.agents):\n # Generates random numbers\n r1 = r.generate_uniform_random_number()\n r2 = r.generate_uniform_random_number()\n\n # Updates agent's velocity\n self.velocity[i] = self.w * self.velocity[i] + \\\n self.c1 * r1 * (self.local_position[i] - agent.position) + \\\n self.c2 * r2 * (space.best_agent.position - agent.position)\n\n # Updates agent's position\n agent.position += self.velocity[i]\n\n # Computing particle's success and updating inertia weight\n self._compute_success(space.agents)\n\n\nclass RPSO(PSO):\n \"\"\"An RPSO class, inherited from Optimizer.\n\n This is the designed class to define RPSO-related\n variables and methods.\n\n References:\n M. Roder, G. H. de Rosa, L. A. Passos, A. L. D. Rossi and J. P. Papa.\n Harnessing Particle Swarm Optimization Through Relativistic Velocity.\n IEEE Congress on Evolutionary Computation (2020).\n\n \"\"\"\n\n def __init__(self, params=None):\n \"\"\"Initialization method.\n\n Args:\n params (dict): Contains key-value parameters to the meta-heuristics.\n\n \"\"\"\n\n logger.info('Overriding class: PSO -> RPSO.')\n\n # Overrides its parent class with the receiving params\n super(RPSO, self).__init__(params)\n\n logger.info('Class overrided.')\n\n @property\n def mass(self):\n \"\"\"np.array: Array of masses.\n\n \"\"\"\n\n return self._mass\n\n @mass.setter\n def mass(self, mass):\n if not isinstance(mass, np.ndarray):\n raise e.TypeError('`mass` should be a numpy array')\n\n self._mass = mass\n\n def compile(self, space):\n \"\"\"Compiles additional information that is used by this optimizer.\n\n Args:\n space (Space): A Space object containing meta-information.\n\n \"\"\"\n\n # Arrays of local positions, velocities and masses\n self.local_position = np.zeros((space.n_agents, space.n_variables, space.n_dimensions))\n self.velocity = np.zeros((space.n_agents, space.n_variables, space.n_dimensions))\n self.mass = r.generate_uniform_random_number(size=(space.n_agents, space.n_variables, space.n_dimensions))\n\n def update(self, space):\n \"\"\"Wraps Relativistic Particle Swarm Optimization over all agents and variables.\n\n Args:\n space (Space): Space containing agents and update-related information.\n\n \"\"\"\n\n # Calculates the maximum velocity\n max_velocity = np.max(self.velocity)\n\n # Iterates through all agents\n for i, agent in enumerate(space.agents):\n # Generates rnadom number\n r1 = r.generate_uniform_random_number()\n r2 = r.generate_uniform_random_number()\n\n # Updates current agent velocity (eq. 11)\n gamma = 1 / np.sqrt(1 - (max_velocity ** 2 / c.LIGHT_SPEED ** 2))\n self.velocity[i] = self.mass[i] * self.velocity[i] * gamma + \\\n self.c1 * r1 * (self.local_position[i] - agent.position) + \\\n self.c2 * r2 * (space.best_agent.position - agent.position)\n\n # Updates current agent position\n agent.position += self.velocity[i]\n\n\nclass SAVPSO(PSO):\n \"\"\"An SAVPSO class, inherited from Optimizer.\n\n This is the designed class to define SAVPSO-related\n variables and methods.\n\n References:\n H. Lu and W. Chen.\n Self-adaptive velocity particle swarm optimization for solving constrained optimization problems.\n Journal of global optimization (2008).\n\n \"\"\"\n\n def __init__(self, params=None):\n \"\"\"Initialization method.\n\n Args:\n params (dict): Contains key-value parameters to the meta-heuristics.\n\n \"\"\"\n\n logger.info('Overriding class: PSO -> SAVPSO.')\n\n # Overrides its parent class with the receiving params\n super(SAVPSO, self).__init__(params)\n\n logger.info('Class overrided.')\n\n def update(self, space):\n \"\"\"Wraps Self-adaptive Velocity Particle Swarm Optimization over all agents and variables.\n\n Args:\n space (Space): Space containing agents and update-related information.\n\n \"\"\"\n\n # Creates an array of positions\n positions = np.zeros((space.agents[0].position.shape[0], space.agents[0].position.shape[1]))\n\n # For every agent\n for agent in space.agents:\n # Sums up its position\n positions += agent.position\n\n # Divides by the number of agents\n positions /= len(space.agents)\n\n # Iterates through all agents\n for i, agent in enumerate(space.agents):\n # Generates a random index for selecting an agent\n idx = r.generate_integer_random_number(0, len(space.agents))\n\n # Updates current agent's velocity (eq. 8)\n r1 = r.generate_uniform_random_number()\n self.velocity[i] = self.w * np.fabs(self.local_position[idx] - self.local_position[i]) * \\\n np.sign(self.velocity[i]) + r1 * (self.local_position[i] - agent.position) + \\\n (1 - r1) * (space.best_agent.position - agent.position)\n\n # Updates current agent's position\n agent.position += self.velocity[i]\n\n # For every decision variable\n for j in range(agent.n_variables):\n # Generates a random number\n r4 = r.generate_uniform_random_number(0, 1)\n\n # If position is greater than upper bound\n if agent.position[j] > agent.ub[j]:\n # Replaces its value\n agent.position[j] = positions[j] + 1 * r4 * (agent.ub[j] - positions[j])\n\n # If position is smaller than lower bound\n if agent.position[j] < agent.lb[j]:\n # Replaces its value\n agent.position[j] = positions[j] + 1 * r4 * (agent.lb[j] - positions[j])\n\n\nclass VPSO(PSO):\n \"\"\"A VPSO class, inherited from Optimizer.\n\n This is the designed class to define VPSO-related\n variables and methods.\n\n References:\n W.-P. Yang. Vertical particle swarm optimization algorithm and its application in soft-sensor modeling.\n International Conference on Machine Learning and Cybernetics (2007).\n\n \"\"\"\n\n def __init__(self, params=None):\n \"\"\"Initialization method.\n\n Args:\n params (dict): Contains key-value parameters to the meta-heuristics.\n\n \"\"\"\n\n logger.info('Overriding class: PSO -> VPSO.')\n\n # Overrides its parent class with the receiving params\n super(VPSO, self).__init__(params)\n\n logger.info('Class overrided.')\n\n @property\n def v_velocity(self):\n \"\"\"np.array: Array of vertical velocities.\n\n \"\"\"\n\n return self._v_velocity\n\n @v_velocity.setter\n def v_velocity(self, v_velocity):\n if not isinstance(v_velocity, np.ndarray):\n raise e.TypeError('`v_velocity` should be a numpy array')\n\n self._v_velocity = v_velocity\n\n def compile(self, space):\n \"\"\"Compiles additional information that is used by this optimizer.\n\n Args:\n space (Space): A Space object containing meta-information.\n\n \"\"\"\n\n # Arrays of local positions, velocities and vertical velocities\n self.local_position = np.zeros((space.n_agents, space.n_variables, space.n_dimensions))\n self.velocity = np.zeros((space.n_agents, space.n_variables, space.n_dimensions))\n self.v_velocity = np.ones((space.n_agents, space.n_variables, space.n_dimensions))\n\n def update(self, space):\n \"\"\"Wraps Vertical Particle Swarm Optimization over all agents and variables.\n\n Args:\n space (Space): Space containing agents and update-related information.\n\n \"\"\"\n\n # Iterates through all agents\n for i, agent in enumerate(space.agents):\n # Generates uniform random numbers\n r1 = r.generate_uniform_random_number()\n r2 = r.generate_uniform_random_number()\n\n # Updates current agent velocity (eq. 3)\n self.velocity[i] = self.w * self.velocity[i] + self.c1 * r1 * (self.local_position[i] - agent.position) + \\\n self.c2 * r2 * (space.best_agent.position - agent.position)\n\n # Updates current agent vertical velocity (eq. 4)\n self.v_velocity[i] -= (np.dot(self.velocity[i].T, self.v_velocity[i]) /\n (np.dot(self.velocity[i].T, self.velocity[i]) + c.EPSILON)) * self.velocity[i]\n\n # Updates current agent position (eq. 5)\n r1 = r.generate_uniform_random_number()\n agent.position += r1 * self.velocity[i] + (1 - r1) * self.v_velocity[i]\n","sub_path":"opytimizer/optimizers/swarm/pso.py","file_name":"pso.py","file_ext":"py","file_size_in_byte":17635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"377769798","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('artist', '0003_auto_20150101_1042'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='artist',\n name='_comment',\n field=models.TextField(null=True, blank=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='artist',\n name='_data_source',\n field=models.CharField(null=True, blank=True, max_length=255),\n preserve_default=True,\n ),\n ]\n","sub_path":"artist/migrations/0004_auto_20150101_1123.py","file_name":"0004_auto_20150101_1123.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"487391515","text":"import ctypes\n\nclass DynamicArray(object):\n\n def __init__(self, *args, **kwargs):\n super(DynamicArray, self).__init__(*args, **kwargs)\n self.current_elements = 0\n self.current_array_size = 1\n self.Array = self._create_array(self.current_array_size)\n\n\n def append(self, element):\n\n if self.current_elements == self.current_array_size:\n self._resize(2 * self.current_elements)\n\n self.Array[self.current_elements] = element\n self.current_elements += 1\n\n\n def _resize(self, new_size):\n NEW_ARRAY = self._create_array(new_size)\n\n for index in range(self.current_elements):\n NEW_ARRAY[index] = self.Array[index]\n\n self.Array = NEW_ARRAY\n self.current_array_size = new_size\n\n @staticmethod\n def _create_array(new_size):\n print(\"New array of size {} created\".format(new_size))\n return (ctypes.py_object * new_size)()\n\n\ndynamic_array = DynamicArray()\ndynamic_array.append(1)\ndynamic_array.append(2)\ndynamic_array.append(3)\ndynamic_array.append(4)\n\nfor element in dynamic_array.Array:\n print(element)","sub_path":"ds/array/dynamic_array.py","file_name":"dynamic_array.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"597488870","text":"from Indicadores import Indicadores\nclass Nomina(Indicadores):\n def __init__(self):\n self.__salarioBasico = 0\n self.__diasLiquidados = 0\n self.__smlv = self.salariominimo()\n self.__auxilioT = 106454\n\n def setsalarioBasico(self, salarioBasico):\n if str(type(salarioBasico)) == \"\" or str(type(salarioBasico)) == \"\":\n print(salarioBasico)\n print(self.salariominimo())\n if salarioBasico >= self.salariominimo():\n self.__salarioBasico = salarioBasico\n else:\n print(\"El salario basico no puede ser inferior al SM legal vigente\")\n else: \n print(\"error\")\n\n def getsalarioBasico(self):\n return self.__salarioBasico\n def setDiasLiquidados(self, diasliquidados):\n self.__diasLiquidados = diasliquidados\n def salarioDevengado(self):\n try:\n return(self.__salarioBasico / 30)* self.__diasLiquidados\n except:\n print(\"Error al calcular salario Devengado\")\n def getDiasLiquidados(self):\n return self.__diasLiquidados\n def auxilioTransporte(self):\n if self.__salarioBasico > (self.__smlv *2):\n return 0\n else:\n return self.__auxilioT / 30 * self.__diasLiquidados\n def totalDevengado(self):\n return self.salarioDevengado() + self.auxilioTransporte()\n def __str__(self):\n return str(\"salario Básico: {} \\n\"\n \"Dias Liquidados: {} \\n\"\n \"Salario Devengado: {} \\n \"\n \"Auxilio de TRansporte:{} \\n\"\n \"Total Devengado: {} \\n\").format(\n self.__salarioBasico,\n self.__diasLiquidados,\n self.salarioDevengado(),\n self.auxilioTransporte(),\n self.auxilioTransporte(),\n self.totalDevengado())\n","sub_path":"Clase 4/POO/nomina.py","file_name":"nomina.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"521319873","text":"import requests\nimport os\n\nPROTOCOL_PREFIX = 'https://'\n\n\ndef download(num=None):\n con_id = num\n if num is None:\n con_id = input(\"DCCON NUM :\")\n if con_id == '':\n exit()\n\n loading_chars = ['|', '/', '-', '\\\\']\n package_url = f'{PROTOCOL_PREFIX}dccon.dcinside.com/index/package_detail'\n download_url = f'{PROTOCOL_PREFIX}dcimg5.dcinside.com/dccon.php?no='\n # Open session\n s = requests.Session()\n # Get Cookie (ci_c)\n r = s.get(package_url,\n headers={'X-Requested-With': 'XMLHttpRequest'})\n # Get Json (ci_c to ci_t)\n req = s.post(package_url,\n headers={'X-Requested-With': 'XMLHttpRequest'},\n data={'ci_t': r.cookies['ci_c'], 'package_idx': con_id})\n\n json_data = req.json()\n\n default_dir = os.path.dirname(os.path.abspath(__file__))\n download_path = os.path.join(default_dir, json_data['info']['title'])\n\n try:\n os.makedirs(download_path)\n except Exception as e:\n print(e)\n else:\n loading_cnt = 0\n\n for item in json_data['detail']:\n print(f'\\rLoading... ({loading_chars[loading_cnt]})', end='')\n loading_cnt = (loading_cnt + 1) % 4\n filename = item['idx'] + '.' + item['ext']\n image = s.get(download_url + item['path'],\n headers={'Referer': f'{PROTOCOL_PREFIX}dccon.dcinside.com/'})\n with open(os.path.join(download_path, filename), 'wb') as fd:\n for chunk in image.iter_content(chunk_size=128):\n fd.write(chunk)\n fd.close()\n s.close()\n\n\nif __name__ == \"__main__\":\n download()\n","sub_path":"python/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"352368619","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport sys\n\n\nfor i in range(1, len(sys.argv)):\n\tfilename = sys.argv[i]\n\n\tdata = open(filename, 'r')\n\tx = []\n\ty = []\n\tfor line in data:\n\t\tx.append(float(line.split()[0]))\n\t\ty.append(float(line.split()[1]))\n\n\tplt.plot(x, y, linewidth=1, label=filename[:filename.find('.')])\n'''\n\tz1 = np.polyfit(x, y, 5)\n\tp1 = np.poly1d(z1)\n\tyvals=p1(x)\n\tplt.plot(x, yvals, linewidth=1, label=filename[:filename.find('.')])\n'''\nplt.ylim(0, 1)\nplt.legend()\nplt.xlabel('Position (bin)')\nplt.ylabel('Normalized read density')\n#ax1.fill_between(x, y, color='tomato', alpha=0.5)\nplt.show()\n#plt.savefig(f'{filename}.png',dpi=100)\n\n","sub_path":"avg_read_depth/average_depth.py","file_name":"average_depth.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"179492836","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 10 04:57:37 2019\n\n@author: hiago\n\"\"\"\nimport os\n\n\ndef retrieve_from_list_file(path):\n \"\"\"Classe para obtenção do .zip e extração do mesmo\n\n Args:\n path: Recebe uma String representando o caminho do diretório\n\n Returns:\n Uma lista com os labels (nomes das pastas) se sucesso,\n False em outro caso.\n\n \"\"\"\n labels_txt = open(path, 'r')\n _labels = labels_txt.readlines()\n\n labels = []\n for idx, item in enumerate(_labels):\n labels.append(_labels[idx].replace('\\n', ''))\n\n labels_txt.close()\n\n return labels\n\n\ndef get_labels(path):\n \"\"\"Classe para obtenção do .zip e extração do mesmo\n\n Args:\n path: Recebe uma String representando o caminho do diretório\n\n Returns:\n Uma lista com os labels (nomes das pastas) se sucesso,\n False em outro caso.\n\n \"\"\"\n if os.path.isfile(path + 'labels.txt'):\n return retrieve_from_list_file(path + 'labels.txt')\n else:\n try:\n _dirs = os.listdir(path)\n if not _dirs:\n raise ValueError(\"O diretorio está vazio\")\n except ValueError as ve:\n print(ve)\n return False\n\n dirs = list(filter(lambda x: os.path.isdir(path + x), _dirs))\n labels = open(path + 'labels.txt', 'w')\n\n for label in dirs:\n labels.write(label + \"\\n\")\n labels.close()\n\n return dirs\n\n\ndef write_folder(folderName):\n folder = open('data/datasets/folder.txt', 'w')\n\n folder.write(folderName + \"\\n\")\n folder.close()\n","sub_path":"Segregator/data/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"211331494","text":"'''\n\t@author Leo Xuzhang Lin, Calvin Mei\n\n\t-todo: Make test cases\n'''\nimport pickle\nimport flask\nimport os\nimport db\nimport util\nimport datetime\nimport base64\nimport hashlib\n\n# Flask import\nfrom flask import Flask, redirect, request, jsonify, render_template, make_response, url_for, send_from_directory\nfrom werkzeug import secure_filename\nfrom bson.binary import Binary\n\n# Crypto\nfrom Crypto.Cipher import AES\nfrom Crypto import Random\nfrom Crypto.PublicKey import RSA\n\n# Database import\nfrom dropboxapi import testapi\n\n# fidi import\nfrom util import *\nfrom db import *\n\n\n# Initializes the app and set the debug mode\napp = Flask(__name__)\n\n# Initializas the token manager\ntoken_manager = {'count':0}\n\n# Upload configuration\nUPLOAD_FOLDER = \"files/\";\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER;\n\n# Dropbox API setup\ntestapi.setup()\n \n# Enable the user to login\n@app.route('/login')\ndef index():\n\t\t# When no token, give splash\n\t\tif request.cookies.get('oatmeal') in [\"\", None]:\n\t\t\treturn render_template('splash.html');\n\t\telse:\n\t\t# When has token, try to give logged in page\n\t\t\treturn redirect('/')\n\n# API call for uploading the key\n@app.route('/key', methods=['GET','POST'])\ndef key():\n\t\t# Only accepts post request\n\t\tif request.method == 'POST':\n\t\t\t\ttoken=request.cookies.get('oatmeal')\n\t\t\t\tprivate=request.form['keytext']\n\t\t\t\t\n\t\t\t\ttoken_manager[token][\"private\"]=private\n\t\t\t\tuid = token_manager[token]['objectid']\n\t\t\t\tuser = find_user_by_objectid(uid)\n\n\t\t\t\taes = user['key']['aes'].encode('ascii', 'ignore')\n\n\t\t\t\tprivate_key=RSA.importKey(private)\n\n\t\t\t\taes = pickle.loads(aes)\n\t\t\t\taes = rsa_decrypt(private_key, aes)\n\n\n\t\t\t\ttoken_manager[token]['aes']=aes\n\n\t\t\t\treturn redirect('/')\n\t\telse:\n\t\t\treturn redirect('/')\n\n@app.route('/logout', methods=['POST'])\ndef logout():\n\t\tif request.method == 'POST':\n\t\t\t\n\t\t\t# Make token invalid\n\t\t\ttoken_manager[request.cookies.get('oatmeal')]=None\n\t\t\t\n\t\t\t# Reroute to / for consistency\n\t\t\treturn redirect('/')\n\n@app.route('/', methods=['GET', 'POST'])\ndef main():\n\n\t\tif request.method == 'POST':\n\n\t\t\t\t# Incoming request from forms\n\t\t\t\tuid = request.form['username']\n\t\t\t\tupw = request.form['password']\n\t\t\t\t\n\t\t\t\t# Retrieved user object from DB\n\t\t\t\tuser = find_user_by_id(uid)\n\n\t\t\t\t# Token generation to hash table\n\t\t\t\tif user and user['pw']==upw: # Simple retarded pw check\n\t\t\t\t\ttoken = tokengen()\n\t\t\t\t\t# Retrieve user from database\n\t\t\t\t\tuname = user['name']\n\t\t\t\t\t\n\t\t\t\t\t# Get user specific path\n\t\t\t\t\tpath = pathgen(str(user['_id']))\n\n\n\t\t\t\t\t# Create server cache folder structure for specific user\n\t\t\t\t\tif not os.path.exists(path):\n\t\t\t\t\t\t\tos.makedirs(path)\n\n\t\t\t\t\t# Retrieve files from file database\n\t\t\t\t\tufiles = file_metadata(path)\n\t\t\t\t\t\n\t\t\t\t\t# Adding token to cached info\n\t\t\t\t\ttoken_manager[token] = {\"objectid\":str(user['_id']), \"name\": uname, \"files\":ufiles, \"private\":None, \"aes\":None}\n\t\t\t\t\t\n\t\t\t\t\t# Creating response\n\t\t\t\t\tresp = render(token) # Checkout render for more info\n\n\t\t\t\t\t# Oatmeal cookie mother fuckers\n\t\t\t\t\tresp.set_cookie('oatmeal', token)\n\n\t\t\t\t\treturn resp\n\t\t\t\t\n\t\t\t\t# Remain at splash if you suck\n\t\t\t\telse:\n\t\t\t\t\trender_template('splash.html')\n\n\n\n\t\t''' \n\t\tComplex / Routing\n\t\t'''\n\n\t\ttoken =request.cookies.get('oatmeal')\n\t\t# Phantom cookie: go to login\n\t\tif token not in token_manager:\n\t\t\tresp=make_response(render_template('splash.html'))\n\t\t\tresp.set_cookie('oatmeal', '')\n\t\t\tredirect('/login')\n\n\t\t# Empty cookie: go to login\n\t\telif not token_manager[token]:\n\t\t\tresp=make_response(render_template('splash.html'))\n\t\t\tresp.set_cookie('oatmeal', '')\n\t\t\tredirect('/login')\n\t\t\n\t\t# Has good cookie, session works (when people comes back to session)\n\t\telse:\n\t\t\ttoken_manager[token]['files'] = file_metadata(pathgen(token_manager[token]['objectid']))\n\t\t\tresp = render(token)\n\n\t\t#respond\n\t\treturn resp\n\n@app.route('/signup', methods=['POST'])\ndef signup():\n\t\tif request.method == 'POST':\n\t\t\tnm = request.form['name']\n\t\t\tuid = request.form['username']\n\t\t\tupw = request.form['password']\n\n\t\t\tkeys = create_rsa_pair()\n\t\t\t\n\t\t\tprivate = keys.exportKey(\"PEM\")\n\t\t\tpublic = keys.publickey().exportKey()\n\n\t\t\taes = create_aes_key()\n\n\t\t\taes = rsa_encrypt(keys.publickey(), aes) #[0]\n\t\t\taes = pickle.dumps(aes);\n\t\t\tobjectid = add_user(nm, uid, upw, {\"public\":public, \"aes\":aes})\n\n\t\t\tredirect('/')\n\t\t\treturn render_template('splash.html', id=objectid, private=private)\n\t\telse:\n\t\t\treturn redirect('/')\n\n# Renders the page with a specific token\ndef render(token):\n\n\t\tuname = token_manager[token][\"name\"]\n\t\tfiles = token_manager[token][\"files\"]\n\t\tprivate = token_manager[token][\"private\"]\n\n\t\tresp = make_response(render_template('main.html', \n\t\t\t\tname=uname, \n\t\t\t\tfilesArray=files,\n\t\t\t\tprivate=private\n\t\t\t\t))\n\t\treturn resp\n\n\n@app.route('/upload', methods=['POST', 'GET'])\ndef upload():\n\t\t\n\t\tif request.method == 'POST':\n\t\t\tfile = request.files['file']\n\t\t\ttoken = request.cookies.get('oatmeal')\n\n\t\t\tif file:\t\n\t\t\t\tfilename = secure_filename(file.filename)\n\t\t\t\tpath = os.path.join(token_manager[token].get('objectid'), app.config['UPLOAD_FOLDER'])\n\n\t\t\t\tif not os.path.exists(path):\n\t\t\t\t\tos.makedirs(path)\n\n\n\t\t\t\t# Add the file name into the path\n\t\t\t\tfpath = os.path.join(path, filename)\n\t\t\t\tfile.save(fpath) # Save the file locally to fpath\n\n\t\t\t\tdate = dategen()\n\n\t\t\t\tufiles = file_metadata(path)\n\t\t\t\ttoken_manager[token]['files'] = ufiles\n\t\t\t\t\n\t\t\t\tkey = token_manager[token][\"aes\"]\n\t\t\t\tfile_as_string = open(fpath, 'r').read()\n\t\t\t\tencrypted_file = open(fpath+\".enc\", \"w\")\n\t\t\t\tenc_string = aes_encrypt(key, file_as_string)\n\t\t\t\tencrypted_file.write(enc_string)\n\n\t\t\t\ttestapi.upload(fpath+\".enc\")\n\n\t\t\t\tos.remove(fpath)\n\t\t\t\tos.remove(fpath+\".enc\")\n\n\t\t\t\treturn redirect('/')\n\t\t\telse:\n\t\t\t\treturn redirect('/')\n\t\telse:\n\t\t\treturn redirect('/')\n\n@app.route('/delete/')\ndef delete(filename):\n\t\ttoken = request.cookies.get('oatmeal')\n\t\tpath = os.path.join(token_manager[token].get('objectid'), app.config['UPLOAD_FOLDER'])\n\t\tfpath = os.path.join(path, filename)\n\t\ttestapi.delete(fpath)\n\t\t\n\t\tufiles = file_metadata(path)\n\t\ttoken_manager[token]['files'] = ufiles\n\t\treturn redirect('/')\n\n\n\n@app.route('/files/')\ndef uploaded_file(filename):\n\t\ttoken = request.cookies.get('oatmeal')\n\t\tpath = os.path.join(token_manager[token].get('objectid'), app.config['UPLOAD_FOLDER'])\n\t\t\n\t\tif not os.path.exists(path):\n\t\t\tos.makedirs(path)\n\t\t\n\t\tfpath = os.path.join(path,filename)\n\t\ttestapi.download(fpath+\".enc\")\n\n\t\tkey = token_manager[token][\"aes\"]\n\t\tfile_as_string = open(fpath+\".enc\", 'r').read()\n\t\tdecrypted_file = open(fpath, \"w\")\n\t\t\n\t\tdec_string = aes_decrypt(key, file_as_string)\n\t\t\n\t\tdecrypted_file.write(dec_string)\n\n\t\treturn send_from_directory(os.path.join(token_manager[token].get('objectid'), app.config['UPLOAD_FOLDER']), filename)\n\n\n''' Encryption zone / methods'''\n\ndef aes_encrypt(key, plaintext):\n BLOCK_SIZE = 16\n PADDING='{'\n pad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * PADDING\n \n EncodeAES = lambda c, s: base64.b64encode(c.encrypt(pad(s)))\n cipher = AES.new(key) \n\n return EncodeAES(cipher, plaintext)\n \ndef aes_decrypt(key, ciphertext):\n BLOCK_SIZE = 16\n PADDING='{'\n DecodeAES = lambda c,e: c.decrypt(base64.b64decode(e)).rstrip(PADDING)\n \n cipher = AES.new(key)\n return DecodeAES(cipher, ciphertext)\n \ndef create_rsa_pair(bits=2048):\n\t\tkeys = RSA.generate(bits, e=65537)\n\t\treturn keys\n \ndef rsa_encrypt(public_key, plaintext):\n\t\tencrypted = public_key.encrypt(plaintext, \"16\")\n\t\treturn encrypted\n\ndef rsa_decrypt(private_key, ciphertext):\n\t\tdecrypted = private_key.decrypt(ciphertext)\n\t\treturn decrypted\n\n''' Creates 32 byte (256-bit) random AES key'''\ndef create_aes_key():\n\t\treturn Random.new().read(16)\n\t\t#return os.urandom(32)\n\n# Generate the file path (refactored)\ndef pathgen(uid):\n\t\treturn os.path.join(uid, app.config['UPLOAD_FOLDER'])\n\n# Generate the file path with filename (refactored)\ndef filepathgen(uid, filename):\n\t\treturn os.path.join(uid, app.config['UPLOAD_FOLDER'])\n\ndef nom(text):\n\t\treturn text.decode('utf-8', 'ignore').encode('ascii','ignore')\n\ndef file_metadata(p):\n\t\tufilespath = testapi.get_meta_data(p)\n\t\tufiles = []\n\t\tcounter=0;\n\t\tfor f in ufilespath:\n\t\t\tcounter+=1\n\t\t\tfname=str(f).split(\"/\")[3]\n\t\t\tufiles.append({\"name\":fname, \"date\":dategen(), \"number\":counter})\n\t\treturn ufiles\n\n\t\n\t\t'''\n\t\tfile = open(\"message.txt\", 'r')\n\t\tfile_as_string = file.read()\n\t\taes = create_aes_key()\n\t\trsa = create_rsa_pair()\n\t\tprint aes\n\t\tct = rsa_encrypt(rsa.publickey(), aes)\n\t\tprint ct\n\t\texport = rsa.exportKey(\"PEM\")\n\t\taes_key = rsa_decrypt(RSA.importKey(export), ct)\n\t\t\n\t\tct = aes_encrypt(aes_key, file_as_string)\n\t\tprint ct\n\t\tpt = aes_decrypt(aes_key, ct)\n\t\tprint pt\n\t\t'''\n\t\t\nif __name__ == '__main__':\n\t\tapp.run(debug=True)\n","sub_path":"fidistorage.py","file_name":"fidistorage.py","file_ext":"py","file_size_in_byte":8703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"451774432","text":"from time import sleep\n\nimport wx\n\nfrom instrument.sensors.lakeshore import LakeshoreController\nfrom threadtools import run_async\n\nclass TemperaturePanel(wx.Panel):\n \"\"\"Panel to display the readouts of a LakeshoreController.\"\"\"\n\n def __init__(self, parent, controller):\n \"\"\"Initialize a TemperaturePanel.\n\n Arguments:\n parent -- Parent panel.\n controller -- Temperature controller.\n Raises:\n None\n \"\"\"\n super(TemperaturePanel, self).__init__(parent)\n\n self.parent = parent\n self.controller = controller\n \n #Init Sizers\n ################################################################\n sbox = wx.StaticBox(self, -1, 'Temperature')\n boxSizer = wx.StaticBoxSizer(sbox, wx.HORIZONTAL)\n sizer = wx.GridSizer(4,2,4,4)\n \n #Components\n ################################################################\n self.temp_a_label = wx.StaticText(\n self, label = LakeshoreController.temp_probes['a']+': ')\n self.temp_b_label = wx.StaticText(\n self, label = LakeshoreController.temp_probes['b']+': ')\n self.temp_c_label = wx.StaticText(\n self, label = LakeshoreController.temp_probes['c']+': ')\n self.temp_d_label = wx.StaticText(\n self, label = LakeshoreController.temp_probes['d']+': ')\n \n self.temp_a = wx.StaticText(self, label = '0 K')\n self.temp_b = wx.StaticText(self, label = '0 K')\n self.temp_c = wx.StaticText(self, label = '0 K')\n self.temp_d = wx.StaticText(self, label = '0 K')\n\n self.temp_a_label.SetFont(wx.Font(13, wx.DEFAULT, \n wx.NORMAL, wx.BOLD))\n self.temp_b_label.SetFont(wx.Font(13, wx.DEFAULT, \n wx.NORMAL, wx.BOLD))\n self.temp_c_label.SetFont(wx.Font(13, wx.DEFAULT, \n wx.NORMAL, wx.BOLD))\n self.temp_d_label.SetFont(wx.Font(13, wx.DEFAULT, \n wx.NORMAL, wx.BOLD))\n\n self.temp_a.SetFont(wx.Font(13, wx.MODERN, wx.NORMAL, wx.BOLD))\n self.temp_b.SetFont(wx.Font(13, wx.MODERN, wx.NORMAL, wx.BOLD))\n self.temp_c.SetFont(wx.Font(13, wx.MODERN, wx.NORMAL, wx.BOLD))\n self.temp_d.SetFont(wx.Font(13, wx.MODERN, wx.NORMAL, wx.BOLD))\n\n #Layout\n ################################################################\n sizer.AddMany([self.temp_a_label,\n self.temp_a,\n self.temp_b_label,\n self.temp_b,\n self.temp_c_label,\n self.temp_c,\n self.temp_d_label,\n self.temp_d,])\n\n boxSizer.Add(\n sizer, flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_CENTER_HORIZONTAL)\n self.SetSizerAndFit(boxSizer)\n\n\n @run_async(daemon=True)\n def monitor_loop(self):\n while True:\n wx.CallAfter(self.temp_a.SetLabel, \n self.controller.get_temp('a')[1:-2]+'K')\n wx.CallAfter(self.temp_b.SetLabel, \n self.controller.get_temp('b')[1:-2]+'K')\n wx.CallAfter(self.temp_c.SetLabel, \n self.controller.get_temp('c')[1:-2]+'K')\n wx.CallAfter(self.temp_d.SetLabel, \n self.controller.get_temp('d')[1:-2]+'K')\n sleep(5)\n","sub_path":"gui/overviewtab/temppanel.py","file_name":"temppanel.py","file_ext":"py","file_size_in_byte":3548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"261378226","text":"import os\nimport scipy.io as sio\nimport numpy as np\nimport h5py\nimport pickle\nfrom operator import truediv\nfrom sklearn.decomposition import PCA\n\n\ndef AA_andEachClassAccuracy(confusion_m):\n list_diag = np.diag(confusion_m)\n list_raw_sum = np.sum(confusion_m, axis=1)\n each_ac = np.nan_to_num(truediv(list_diag, list_raw_sum))\n average_acc = np.mean(each_ac)\n return each_ac, average_acc\n\n\ndef applyPCA(Xc, numComponents=75):\n newX = np.reshape(Xc, (-1, Xc.shape[2]))\n pcaC = PCA(n_components=numComponents, whiten=True)\n newX = pcaC.fit_transform(newX)\n newX = np.reshape(newX, (Xc.shape[0], Xc.shape[1], numComponents))\n return newX, pcaC\n\n\ndef applyPCAtest(Xc, pcaC, numComponents=75):\n newX = np.reshape(Xc, (-1, Xc.shape[2]))\n newX = pcaC.transform(newX)\n newX = np.reshape(newX, (Xc.shape[0], Xc.shape[1], numComponents))\n return newX\n\n\ndef load_data(dataset=\"\", ann=False, test=True):\n \"\"\"Load specified dataset\"\"\"\n x = None\n y = None\n\n if dataset == \"Kochia\":\n x, y = load_Kochia(ann, test)\n elif dataset == \"IP\" or dataset == \"PU\" or dataset == \"SA\":\n x, y = load_HSISAT(dataset)\n x, pca = applyPCA(x, numComponents=30)\n x, y = createImageCubes(x, y, window=25)\n elif dataset == 'EUROSAT':\n x, y = load_EUROSAT()\n\n return x, y\n\n\ndef load_Kochia(ann=False, test=True):\n \"\"\"Load Kochia dataset\"\"\"\n\n # Download the dataset from https://montana.box.com/s/mhpi7mxlw68abb616v0zl9t03zfwue63\n # and paste it in the project folder\n\n hdf5_file = h5py.File('weed_dataset_w25.hdf5', \"r\")\n train_x = np.array(hdf5_file[\"train_img\"][...])\n train_y = np.array(hdf5_file[\"train_labels\"][...])\n\n if ann:\n # Here, the pre-processing step for KochiaFC is different. See Sec. 5.\n train_x = train_x[:, 8:18, 8:18, :, :]\n train_x = np.reshape(train_x, (train_x.shape[0], train_x.shape[1] * train_x.shape[2], train_x.shape[3]))\n listx = []\n listy = []\n for i in range(0, len(train_x)):\n for j in range(0, train_x.shape[1]):\n nir = np.mean(train_x[i, j, 175:199])\n red = np.mean(train_x[i, j, 132:156])\n ndvi = (nir - red) / (nir + red)\n if ndvi > 0.6:\n listx.append(train_x[i, j, :])\n listy.append(train_y[i])\n train_x = np.array(listx)\n train_y = np.array(listy)\n\n else:\n train_x = np.reshape(train_x, (train_x.shape[0], train_x.shape[1], train_x.shape[2], train_x.shape[3], 1))\n\n cant = train_x.shape[0]\n size = train_x.shape[1]\n nband = train_x.shape[3]\n print(\"PCA ransformation begins\")\n if test:\n # Load saved PCA transformation\n with open('Data/pca_Kochia', 'rb') as f:\n pca = pickle.load(f)\n train_x = applyPCAtest(np.reshape(train_x, (cant * size, size, nband)), pca, numComponents=100)\n else:\n train_x, pca = applyPCA(np.reshape(train_x, (cant * size, size, nband)), numComponents=100)\n with open('Data/pca_Kochia', 'wb') as f:\n pickle.dump(pca, f)\n # Reshape to its original window shape\n train_x = np.reshape(train_x, (cant, size, size, 100, 1))\n train_x = (train_x - np.mean(train_x)) / np.std(train_x)\n\n return train_x, train_y\n\n\ndef load_EUROSAT():\n \"\"\"Load EUROSAT dataset\"\"\"\n\n # The original EUROSAT dataset can be downloaded from https://github.com/phelber/EuroSAT. Alternatively, a\n # pre-processed ready-to-use dataset that combines all the images in a single \".h5\" file can be downloaded from\n # https://montana.box.com/s/wqakb91vp3fwe272ctx88n791s4gnqvj\n\n hdf5_file = h5py.File('EUROSAT.hdf5', \"r\")\n train_x = np.array(hdf5_file[\"train_img\"][...]).astype(np.float) / 4000\n train_x = np.reshape(train_x, (train_x.shape[0], train_x.shape[1], train_x.shape[2], train_x.shape[3]))\n train_y = np.array(hdf5_file[\"train_labels\"][...])\n test_x = np.array(hdf5_file[\"test_img\"][...]).astype(np.float) / 4000\n test_x = np.reshape(test_x, (test_x.shape[0], test_x.shape[1], test_x.shape[2], test_x.shape[3]))\n test_y = np.array(hdf5_file[\"test_labels\"][...])\n\n train_x = np.concatenate((train_x, test_x))\n train_y = np.concatenate((train_y, test_y))\n\n return train_x, train_y\n\n\ndef load_HSISAT(name):\n data_path = os.path.join(os.getcwd(), 'Data')\n if name == 'IP':\n dat = sio.loadmat(os.path.join(data_path, 'Indian_pines_corrected.mat'))['indian_pines_corrected']\n label = sio.loadmat(os.path.join(data_path, 'Indian_pines_gt.mat'))['indian_pines_gt']\n return dat, label\n elif name == 'SA':\n dat = sio.loadmat(os.path.join(data_path, 'Salinas_corrected.mat'))['salinas_corrected']\n label = sio.loadmat(os.path.join(data_path, 'Salinas_gt.mat'))['salinas_gt']\n return dat, label\n elif name == 'PU':\n dat = sio.loadmat(os.path.join(data_path, 'PaviaU.mat'))['paviaU']\n label = sio.loadmat(os.path.join(data_path, 'PaviaU_gt.mat'))['paviaU_gt']\n return dat, label\n\n\ndef padWithZeros(Xc, margin=2):\n newX = np.zeros((Xc.shape[0] + 2 * margin, Xc.shape[1] + 2 * margin, Xc.shape[2]))\n x_offset = margin\n y_offset = margin\n newX[x_offset:Xc.shape[0] + x_offset, y_offset:Xc.shape[1] + y_offset, :] = Xc\n return newX\n\n\ndef createImageCubes(Xc, yc, window=5, removeZeroLabels=True):\n margin = int((window - 1) / 2)\n zeroPaddedX = padWithZeros(Xc, margin=margin)\n # split patches\n patchesData = np.zeros((Xc.shape[0] * Xc.shape[1], window, window, Xc.shape[2]))\n patchesLabels = np.zeros((Xc.shape[0] * Xc.shape[1]))\n patchIndex = 0\n for r in range(margin, zeroPaddedX.shape[0] - margin):\n for c in range(margin, zeroPaddedX.shape[1] - margin):\n patch = zeroPaddedX[r - margin:r + margin + 1, c - margin:c + margin + 1]\n patchesData[patchIndex, :, :, :] = patch\n patchesLabels[patchIndex] = yc[r - margin, c - margin]\n patchIndex = patchIndex + 1\n if removeZeroLabels:\n patchesData = patchesData[patchesLabels > 0, :, :, :]\n patchesLabels = patchesLabels[patchesLabels > 0]\n patchesLabels -= 1\n return patchesData, patchesLabels\n\n\ndef Patch(data, height_index, width_index, windowSize):\n height_slice = slice(height_index, height_index + windowSize)\n width_slice = slice(width_index, width_index + windowSize)\n patch = data[height_slice, width_slice, :]\n\n return patch","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"56218359","text":" \nfrom django.urls import include , path\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.views import LoginView , LogoutView , logout_then_login\n\nfrom . import views\n\n\napp_name = 'Users'\nurlpatterns = [\n path('' , LoginView.as_view( template_name='users/login.html', ) , name = 'Login'),\n path('register/' , views.registration_view , name = 'register'),\n path('logout/' , login_required(LogoutView.as_view()) , name = 'logout'),\n path('/edituser/' , views.editUser , name = 'edituser'),\n\n path('profile/order/' , login_required(views.order_view) , name = 'order'),\n path('profile/' , login_required(views.profile) , name = 'profile'),\n\n path('/delete/' , login_required(views.delete) , name = 'delete'),\n path('profile//del/' , login_required(views.Pre_del.as_view()) , name = 'pre_delete'),\n path('profile//edit/' , login_required(views.edit) , name = 'edit'),\n]\n","sub_path":"userss/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"250400209","text":"from django.shortcuts import get_object_or_404, render\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse_lazy\nfrom .forms import TalkSubmissionForm\nfrom .models import Session, Talk\n\nNOT_IMPLEMENTED_MSG = 'This view is not implemented. Please tell the Podium ' \\\n 'maintainers to fix this.'\n\n\ndef submit_talk_view(request):\n form = TalkSubmissionForm(request.POST or None)\n if form.is_valid():\n form.save()\n form = TalkSubmissionForm()\n return HttpResponseRedirect(reverse_lazy('talks-sessions'))\n return render(request, 'talks/submit.html', {\n 'form': form,\n })\n\n\ndef talk_detail_view(request, talk_id):\n talk = get_object_or_404(Talk, id=talk_id)\n context = {\n 'talk': talk,\n }\n return render(request, 'talks/talk_detail.html', context)\n\n\ndef talk_list_view(request):\n talks = Talk.objects.all()\n context = {\n 'talks': talks,\n }\n return render(request, 'talks/talks.html', context)\n\n\ndef session_list_view(request):\n sessions = Session.objects.all()\n context = {'sessions': sessions}\n return render(request, 'talks/sessions.html', context)\n\n\ndef session_talk_list_view(request, id):\n session = get_object_or_404(Session, id=id)\n return render(request, 'talks/session-detail.html', {\n 'session': session,\n 'talks': session.talks_available.all(),\n })\n","sub_path":"podium/talks/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"590678907","text":"import os\nimport glob\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\nimport numpy as np\nfrom spt3g import core, gcp\n\ndef obsid_to_g3time(obsid_in, verbose=True):\n '''\n Blatantly stolen from TC\n '''\n obsid = np.int(obsid_in)\n mjd0 = core.G3Time('01-Jan-2017:00:00:00').mjd\n time_out = core.G3Time()\n time_out.mjd = mjd0 + obsid/86400.\n return time_out\n\nclass ObsIdFinder(object):\n '''\n Spool through an arcfile to find the start, stop and schedule name for each \n obsid in the file. Partial observations or observations in which the\n schedule name changes are ignored.\n\n RETURNS\n -------\n A dictionary mapping observation ids to dictionaries containing\n 'start': the start time of the obsid\n 'stop': the end time of the obsid\n 'sched': the name of the schedule\n Note that start and stop times are for reference only.\n They are should be accurate to +/- 1 s.\n '''\n def __init__(self):\n self.lut = dict()\n self.id_data = dict()\n self.last_id = -1\n self.all_keys = ['start', 'stop', 'sched']\n\n def __call__(self, fr):\n if fr.type != core.G3FrameType.GcpSlow:\n return\n try:\n id = fr['antenna0']['tracker']['obs_id'].value\n except KeyError:\n return\n if id != self.last_id:\n self.id_data['stop'] = fr['antenna0']['tracker']['utc'][0][0]\n # obsid has changed, so put it in the master dict,\n # as long as all the keys have been filled\n good_id = True\n for key in self.all_keys:\n if key not in self.id_data:\n good_id = False\n break\n if good_id:\n self.lut[self.last_id] = self.id_data\n # now set up the new dict\n self.last_id = id\n self.id_data = dict()\n self.id_data['start'] = fr['antenna0']['tracker']['utc'][0][0]\n self.id_data['sched'] = fr['antenna0']['tracker']['schedule_name'].value\n # if we started part way through an obs, obsid will not match the \n # start time.\n if obsid_to_g3time(id).time - self.id_data['start'].time > 2 * core.G3Units.s:\n self.id_data.pop('sched')\n else:\n if 'sched' not in self.id_data:\n # this obs id was previously ignored\n return\n if self.id_data['sched'] != fr['antenna0']['tracker']['schedule_name'].value:\n # this isn't a valid observation, \n # so ignore it by unsetting 'sched'\n self.id_data.pop('sched')\n\ndef get_obsids_from_arc(arcfile):\n finder = ObsIdFinder()\n pipe = core.G3Pipeline()\n if len(arcfile) == 0:\n print('No arcfiles found')\n return dict()\n pipe.Add(gcp.ARCFileReader, filename = arcfile)\n pipe.Add(finder)\n pipe.Run()\n return finder.lut\n\ndef find_recent_arcs(time, arcdir):\n '''\n Find the last arcfile before `time`\n '''\n files = sorted(glob.glob(os.path.join(arcdir, '*.dat')))\n last_file = 0\n for f in files:\n timestr = os.path.basename(f).strip('.dat')\n file_time = core.G3Time(timestr)\n if file_time > time:\n break\n last_file += 1\n return files[last_file:]\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser(\n description=\"Create a look up table to for observation IDs from arcfiles\")\n parser.add_argument('--output-file', '-o', type = str, \n default = '/spt/users/ndhuang/public/odsid_lut.pkl',\n help = 'The output file in which to store the look up table')\n parser.add_argument('--overwrite', action = 'store_true',\n help = 'Overwrite the output file, instead of updating it')\n parser.add_argument('--arcdir', type = str,\n default = '/spt/data/arc',\n help = 'The directory containing arcfiles')\n args = parser.parse_args()\n if not args.overwrite and os.path.exists(args.output_file) and \\\n os.path.getsize(args.output_file) > 0:\n f = open(args.output_file, mode = 'rb')\n lut = pickle.load(f)\n f.close()\n if len(lut.keys()) == 0:\n arcfiles = find_recent_arcs(core.G3Time('20170125_000000'), \n args.arcdir)\n else:\n last_id = max(lut.keys())\n arcfiles = find_recent_arcs(lut[last_id]['stop'], args.arcdir)\n else:\n lut = dict()\n # Make sure we don't accidentally get spt{pol,sz} arcfiles\n arcfiles = find_recent_arcs(core.G3Time('20170125_000000'), \n args.arcdir)\n lut.update(get_obsids_from_arc(arcfiles))\n f = open(args.output_file, mode = 'wb')\n pickle.dump(lut, f, protocol = 2)\n f.close()\n","sub_path":"scratch/ndhuang/obsid_lut.py","file_name":"obsid_lut.py","file_ext":"py","file_size_in_byte":4885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"453331598","text":"from PipelineAttributes import stages\nimport time\nimport sys\n\n__author__ = 'Nima Pakseresht, Blaise Alako'\n\nimport cx_Oracle\n\nclass Oracle(object):\n\n def connect(self, username, password, hostname, port, servicename):\n \"\"\" Connect to the database. \"\"\"\n\n try:\n self.db = cx_Oracle.connect(username, password\n , hostname + ':' + port + '/' + servicename)\n except cx_Oracle.DatabaseError as e:\n # Log error as appropriate\n raise\n\n # If the database connection succeeded create the cursor\n # we-re going to use.\n self.cursor = self.db.cursor()\n\n def disconnect(self):\n \"\"\"\n Disconnect from the database. If this fails, for instance\n if the connection instance doesn't exist, ignore the exception.\n \"\"\"\n\n try:\n self.cursor.close()\n self.db.close()\n except cx_Oracle.DatabaseError:\n pass\n\n def execute(self, sql, bindvars=None, commit=False):\n \"\"\"\n Execute whatever SQL statements are passed to the method;\n commit if specified. Do not specify fetchall() in here as\n the SQL statement may not be a select.\n bindvars is a dictionary of variables you pass to execute.\n \"\"\"\n\n try:\n self.cursor.execute(sql, bindvars)\n except cx_Oracle.DatabaseError as e:\n # Log error as appropriate\n raise\n\n # Only commit if it-s necessary.\n if commit:\n self.db.commit()\n\n\ndef get_process_id(id):\n time.sleep(2)\n return id + \"-\" + str(time.strftime(\"%d%m%Y%H%M%S\"))\n\n\ndef set_started(conn, selection_id):\n query = \"update process_selection set selection_to_attribute_start=NOW() where selection_id={}\".format(selection_id)\n cursor = conn.cursor()\n try:\n cursor.execute(query)\n conn.commit()\n except:\n print(\"ERROR: Cannot update process_stages set stage_start=NOW():\", file=sys.stderr)\n message = str(sys.exc_info()[1])\n error_list.append(message)\n print(\"Exception: {}\".format(message), file=sys.stderr)\n conn.rollback()\n\n\ndef set_finished(conn, selection_id):\n query = \"update process_selection set selection_to_attribute_end=NOW() where selection_id={}\".format(selection_id)\n cursor = conn.cursor()\n try:\n cursor.execute(query)\n conn.commit()\n except:\n print(\"ERROR: Cannot update process_stages set stage_end=NOW():\", file=sys.stderr)\n message = str(sys.exc_info()[1])\n error_list.append(message)\n print(\"Exception: {}\".format(message), file=sys.stderr)\n conn.rollback()\n\n\ndef insert_default_stages(conn, process_id, selection_id):\n stage_list = [stages.data_provider_stage_name, stages.core_executor_stage_name, stages.analysis_reporter_stage_name,\n stages.process_archival_stage_name]\n print('*' * 100)\n print(process_id, selection_id, stage_list)\n print('*' * 100)\n print(process_id, selection_id, stage_list, file=sys.stdout)\n default_stage = stages(process_id, selection_id, stage_list)\n default_stage.insert_all_into_process_stages(conn)\n\n\ndef process_report_set_started(conn, info):\n \"\"\"info is a dict with the following:\n\t study_id, datahub, run_id,process_id, selection_id, start_time\n\t\"\"\"\n study_accession = info['study_accession']\n run_accession = info['run_accession']\n datahub = info['datahub']\n process_id = info['process_id']\n selection_id = info['selection_id']\n query = \"INSERT INTO process_report (study_accession,datahub,run_accession,process_id,selection_id,process_report_start_time) values('{}','{}','{}','{}','{}',now())\".format(\n study_accession, datahub, run_accession, process_id, selection_id)\n print('*' * 100)\n print(\"PROCESS_REPORT QUERY:\\n\\t{}\".format(query), \"\\n\", sep=\"\")\n print('*' * 100)\n cursor = conn.cursor()\n try:\n cursor.execute(query=query)\n conn.commit()\n except:\n print(\n \"Error: Can not INSERT study:{} datahub:{} process_id:{} selection_id:{} run:{} in process_report \".format(\n study_accession, datahub, process_id, selection_id, run_accession), file=sys.stderr)\n traceb, message, tb = sys.exc_info()\n error_list.append(message)\n print(\"Exception: exc_info[0]:{}, exc_info[1]:{} , exc_info[2] \".format(traceb, message, tb), file=sys.stderr)\n conn.rollback()\n cursor.execute('show profiles')\n for row in cursor:\n print(row)\n\n\n\ndef already_ran_runs (conn, selection_id):\n \"\"\" Get previously ran run accessions from the Process\n report table ....\n \"\"\"\n query =\"Select distinct run_accession from process_report where process_report_start_time is not null and selection_id ={}\".format(selection_id)\n cursor = conn.cursor()\n try:\n cursor.execute(query)\n ran_accessions = [run[0] for run in cursor]\n except:\n message = str(sys.exc_info()[1])\n error_list.append(message)\n print(\"Exception: {}\".format(message), file=sys.stderr)\n conn.rollback()\n return ran_accessions\n\n\nclass Process_report:\n def __init__(self, select, attr, error_list):\n self.select = select\n self.attr = attr\n self.error_list = error_list\n self.continuity = select.continuity\n self.selection_to_attribute_end = select.selection_to_attribute_end\n self.attr.selection_id = select.selection_id\n self.attr.datahub = select.datahub\n self.attr.pipeline_name = select.pipeline_name\n self.attr.public = select.public\n self.attr.analyst_webin_id = select.analyst_webin_id\n self.attr.process_id = get_process_id(attr.run_accession)\n\n\n def log_process_report_info(self, conn):\n self.info = dict()\n self.conn = conn\n\n \"\"\"\" We need to update process_report with study_id, datahub, run_id,process_id, selection_id, start_time \"\"\"\n self.info['study_accession'] = self.attr.study_accession\n self.info['datahub'] = self.select.datahub\n self.info['run_accession'] = self.attr.run_accession\n self.info['process_id'] = self.attr.process_id\n self.info['selection_id'] = self.select.selection_id\n print('=' * 100)\n print(\"Process report info:\")\n print(self.info)\n print('=' * 100)\n\n \"\"\" GET_PROCESS_ID takes a run id and append to it the current date and time \"\"\"\n \"\"\" Contiguity is NO \"\"\"\n\n self.attr.insert_all_into_process_stages(self.conn)\n\n \"\"\" INSERT_ALL_INTO_PROCESS_STAGE call on INSERT_INTO_PROCESS_STAGE process_stages\n (process_id, selection_id, stage_name)\n \"\"\"\n insert_default_stages(self.conn, self.attr.process_id, self.attr.selection_id)\n\n \"\"\"\" calls insert_all_into_process_stages to insert stage_list into process_stages\n stage_list = [stages.data_provider_stage_name, stages.core_executor_stage_name,\n stages.analysis_reporter_stage_name,stages.process_archival_stage_name] \n Update process_report table \n \"\"\"\n \"\"\" Update process_report table \"\"\"\n process_report_set_started(self.conn, self.info)\n\n if len(self.error_list) != 0:\n self.final_errors = ' '.join(str(v).replace(\"'\", \"\") for v in self.error_list)\n set_error(self.conn, self.select.selection_id, self.final_errors.replace(\"'\", \"\"))\n else:\n set_finished(self.conn, self.select.selection_id)\n \"\"\"\"SET_FINISHED: updates process_selection by \n setting selection_to_attribute_end\n \"\"\"\n self.error_list = list()\n","sub_path":"scripts/reporting.py","file_name":"reporting.py","file_ext":"py","file_size_in_byte":7713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"303596652","text":"# analyse_new.py\r\n# This code checks the efficiency of a trigger on SN events \r\n\r\n\r\n# import the module\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np;\r\nimport pandas as pd;\r\nimport allFunctions\r\n\r\n# Global variables\r\nresolution = 0.05 # 50 nanoseconds in microseconds\r\nSN_event_time = 10 # Over 80% of SN signal is recorded within 0.1 microseconds\r\n# Usual number of bins in which a SN is recorded\r\nSN_event_nr_bins = int(SN_event_time/resolution)\r\ntrigDuration = 10. #microseconds\r\n\r\n\r\n# open the files\r\nreadFileSN = open('time_SN.txt','r')\r\nreadFileAr39 = open('time_Ar39.txt','r')\r\nreadFileEvents = open('events_SN.txt','r')\r\nsimulationTime = 2.5 * 1000000 #2.5 seconds in microseconds\r\n\r\n\r\n# create empty arrays\r\ntimeSN = [];\r\ntimeAr = [];\r\ntime = [];\r\neventsSN = [];\r\n\r\n# read the SN file\r\nfor line in readFileSN:\r\n # split the input line based on a comma\r\n splitUp = line.split(\",\");\r\n # The lines of interest have plenty of data\r\n for i in range(0,len(splitUp)):\r\n try:\r\n timeSN.append(float(splitUp[i].replace(' ','')))\r\n time.append(timeSN[i]);\r\n except:\r\n print(splitUp[i])\r\n\r\n# read the Ar39 file\r\nfor line in readFileAr39:\r\n # split the input line based on a comma\r\n splitUp = line.split(\",\");\r\n # The lines of interest have plenty of data\r\n for i in range(0,len(splitUp)):\r\n try:\r\n timeAr.append(float(splitUp[i].replace(' ','')))\r\n time.append(timeAr[i])\r\n except:\r\n print(splitUp[i])\r\n \r\n \r\n\r\n# read the SN events file\r\nlineNr = 0\r\nfor line in readFileEvents:\r\n # split the input line based on a comma\r\n splitUp = line.split(\",\");\r\n # Create list of arrays\r\n eventsSN.append([])\r\n for i in range(0,len(splitUp)):\r\n try:\r\n eventsSN[lineNr].append(float(splitUp[i].replace(' ','')))\r\n except Exception as ex:\r\n print(ex)\r\n print(splitUp[i])\r\n lineNr = lineNr + 1\r\n\r\neventsSN = pd.DataFrame(eventsSN, columns = ['energy','distanceToAnode','eventTime'])\r\n# eventTime wrongly saved in seconds. Convert to micro seconds (x1e6)\r\neventsSN.loc[:,'eventTime'] *= 1000000\r\n\r\n# Sort all arrays of time\r\ntime = np.sort(time)\r\ntimeSN = np.sort(timeSN)\r\ntimeAr = np.sort(timeAr) \r\neventsSN = eventsSN.sort_values('eventTime')\r\n\r\n# Divide data into \"steps\" given by the resolution\r\nevents = allFunctions.DivideDataByRes(resolution,time,simulationTime)\r\n\r\n\"\"\"\r\nfor i in range(0,len(eventsSN)):\r\n eventsSN['arrivalTime'][i] = np.array\r\n\r\nfor i in range(0,len(eventsSN)-1):\r\n j = 0\r\n while(timeSN[j]0)/len(SNCandidates)\r\nfakeRate = len(fakeTrig) / (simulationTime/1000000)\r\nprint(trigEf)\r\nprint(fakeRate)\r\n# Add SNTrig to eventsSN\r\neventsSN['TriggerResponse'] = SNCandidates.tolist()\r\n\"\"\"\r\n\r\n\r\n\r\n# Grid search to find optimum threshold and SN_event_time\r\nthresholdVals = [5,7,10,13,15]\r\nSN_event_timeVals = [0.5,2,5,10,15,20,25,35]\r\n\r\n[df_eff, df_fake] = allFunctions.GridSearch(events, thresholdVals, SN_event_timeVals, eventsSN, trigDuration,resolution)\r\n\r\nX, Y = np.meshgrid(list(map(float, df_eff.index)),list(map(float, df_eff.columns)))\r\n\r\nallFunctions.PlotGridSearch(df_eff,efficiency=True)\r\nallFunctions.PlotGridSearch(df_fake,efficiency=False)\r\n\r\ndf_eff.to_csv('efficiencies.csv', sep=',')\r\ndf_fake.to_csv('fakeEvents.csv', sep=',')\r\n\r\n#allFunctions.Plot_Trigger_Distrib(eventsSN,trigEf,fakeRate,threshold,SN_event_nr_bins,mean_events,SN_event_time)\r\n\r\n# allFunctions.SVMClass(eventsSN)\r\n\r\n\r\n","sub_path":"week6/analyse_integratedTrig_all.py","file_name":"analyse_integratedTrig_all.py","file_ext":"py","file_size_in_byte":4060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"121285245","text":"#\n# Copyright (c) 2018 ISP RAS (http://www.ispras.ru)\n# Ivannikov Institute for System Programming of the Russian Academy of Sciences\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom core.pfg.abstractions.strategies import Abstract\n\n\nclass Callgraph(Abstract):\n \"\"\"This strategy gets a target fragment and adds recursievely fragments which are used by this one fragment.\"\"\"\n\n def _make_groups(self):\n \"\"\"\n Just return target fragments as aggregations consisting of fragments that are required by a target one\n collecting required fragments for given depth.\n\n :return: {GroupName: Set of Fragments}.\n \"\"\"\n # First we need fragments that are completely fullfilled\n max_deep = self.fragmentation_set_conf.get('dependencies recursive depth', 3)\n max_size = self.fragmentation_set_conf.get('maximum files')\n for fragment in self.program.target_fragments:\n name = fragment.name\n files = self.program.collect_dependencies(fragment.files, depth=max_deep, max=max_size)\n fragments = self.program.get_fragments_with_files(files)\n fragments.add(fragment)\n self.add_group(name, fragments)\n","sub_path":"core/core/pfg/abstractions/strategies/callgraph.py","file_name":"callgraph.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"478730625","text":"#!/usr/bin/env python3\n\n\"\"\"\nPretty prints an XML file.\n\"\"\"\n\nimport sys, os, re\nfrom lxml import etree\n\ndef formatXML(xml,parser):\n\n\txmldoc = etree.parse(xml, parser)\n\tos.remove(xml)\n\txmldoc.write(xml, encoding=\"utf-8\", pretty_print=True)\n\ndef main(args=None):\n\n\txml = sys.argv[1]\n\tparser = etree.XMLParser(remove_blank_text=True)\n\tformatXML(xml,parser)\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"met/scripts/xmlFormatter.py","file_name":"xmlFormatter.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"68744097","text":"# alternative provided by TA 李旺陽\nimport heapq\ndef KthElements(k, n, a):\n # Write your code here\n a = [-x for x in a]\n heap = a[0:k]\n\n heapq.heapify(heap)\n ans = [-heap[0]]\n\n for i in a[k:]:\n heapq.heappushpop(heap, i)\n ans.append(-heap[0])\n\n return ans\n","sub_path":"exam/01/home/the_kth_minimum_element.py","file_name":"the_kth_minimum_element.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"171314790","text":"import numpy as np\nimport scipy as sci\nimport gym\n\ndef get_state_probs(n_states, n_arms):\n state_probs = np.zeros((n_states, n_arms))\n for s in range(n_states):\n state_probs[s, :] = np.random.uniform(0.0, 0.4)\n best_arm = np.random.randint(n_arms)\n state_probs[s, best_arm] = 0.9\n \n return state_probs\nclass ContextualBandit(gym.Env):\n\n def __init__(self, config):\n self.n_arms = config[\"num_arms\"]\n self.n_states = config[\"num_states\"]\n \n # nS x nArms matrix\n self.probs = config[\"state_probs\"]\n\n assert self.probs.shape == (self.n_states, self.n_arms) \n\n # Best arms in each state\n self.best_probs = np.max(self.probs, axis=1)\n \n self.cumulative_reward = np.zeros((self.n_states, self.n_arms))\n self.visits = np.zeros((self.n_states, self.n_arms))\n self.regret = [0.]\n self.state = np.random.randint(self.n_states)\n \n def step(self, action):\n assert action < self.n_arms and action >= 0\n \n self.visits[self.state, action] += 1\n self.regret.append(self.best_probs[self.state] - self.probs[self.state, action])\n \n if np.random.random() < self.probs[self.state, action]:\n self.cumulative_reward[self.state, action] += 1\n self.state = np.random.randint(self.n_states)\n return 1, self.state\n\n # next state\n self.state = np.random.randint(self.n_states)\n return 0, self.state\n\n def reset(self):\n self.cumulative_reward.fill(0)\n self.regret = [0.]\n self.visits.fill(0) \n \n self.state = np.random.randint(self.n_states)\n return self.state\n\n def arm_avgs(self, state):\n return self.cumulative_reward[state, :] / (self.visits[state, :] + 0.00001)\n\n def seed(self, seed):\n np.random.seed(seed)\n \n def get_regret(self):\n return self.regret\n","sub_path":"bandits_and_mcts/envs/context_bandit.py","file_name":"context_bandit.py","file_ext":"py","file_size_in_byte":1937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"523714329","text":"from rpyc import Service\nfrom rpyc.utils.server import ThreadedServer\nimport os\n\n\nclass USBRelayService(Service):\n\n def exposed_relay_control(self, replay: str = \"01\", on_off: str = \"ON\") -> int:\n os.chdir(\"D:/Elden/USBRelay/USBRelay外部使用开发库/TestApp\") # enter the path of CommandApp_USBRelay.exe\n result = -1 # 如果是-1的话基本代表参数使用不当, 没有调用下面的逻辑\n if on_off.lower() == \"on\":\n # open relay 0x\n # return value : 0 -- success; 1 -- error; 2 -- index is outnumber the number of the usb relay device\n result = os.system(\"CommandApp_USBRelay BITFT open {0}\".format(replay))\n if on_off.lower() == \"off\":\n result = os.system(\"CommandApp_USBRelay BITFT close {0}\".format(replay)) # close relay 01\n return result\n\n\nif __name__ == '__main__':\n # 先在terminal运行这个服务, 然后使用rpc client来访问这个服务\n s = ThreadedServer(USBRelayService, port=9999, auto_register=False)\n s.start()\n","sub_path":"Python-Library/rpyc/usb_control_server_test.py","file_name":"usb_control_server_test.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"98224185","text":"# Read results files and create a error graph\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport ast\nfrom collections import Counter\n\nmode = 5\nminSh = 2\nmaxSh = 8 # put +1 than you need\nminInput = 2\nmaxInput = 3# put +1 than you need\nshardList = [2, 3, 4, 5, 6, 7, 8, 9, 10]\n\n#Clever\nfilename = \"Clever/clever\"\ntpsMean = []\ntpsStd = []\nLatMean = []\nLatStd = []\nallTps = x = [[] for i in range(10)]\nfor i in shardList:\n\tfile = filename+str(i)+\".txt\"\n\tf = open(file, \"r\")\n\tcount = 0\n\tprint(i)\n\tfor line in f:\n\t\tif count <=5:\n\t\t\tif count <5:\n\t\t\t\tline = line.split(\" \")\n\t\t\tif count != 4:\n\t\t\t\tx = ast.literal_eval(line[1])\n\n\n\t\t\tif count == 0: tpsMean.append(x[0]*i) \n\t\t\telif count == 1: tpsStd.append(float(line[1]))\n\t\t\telif count == 2: LatMean.append(x[0])\n\t\t\telif count == 3: LatStd.append(float(line[1]))\n\t\t\telif count == 4: pass\n\t\t\telif count == 5: \n\t\t\t\tprint(ast.literal_eval(line))\n\t\t\t\tallTps[i-1].append(ast.literal_eval(line))\n\t\t\tcount += 1\n\n### Rabdom ###\nfilename2 = \"Random/random\"\ntpsMeanRand = []\ntpsStdRand = []\nLatMeanRand = []\nLatStdRand = []\nallTpsRand = x = [[] for i in range(10)]\n\nfor i in shardList:\n\tfile = filename2+str(i)+\".txt\"\n\tf = open(file, \"r\")\n\tcount = 0\n\tfor line in f:\n\t\tif count <=5:\n\t\t\tif count <5:\n\t\t\t\tline = line.split(\" \")\n\t\t\tif count != 4:\n\t\t\t\tx = ast.literal_eval(line[1])\n\n\n\t\t\tif count == 0: tpsMeanRand.append(x[0]*i) \n\t\t\telif count == 1: tpsStdRand.append(float(line[1]))\n\t\t\telif count == 2: LatMeanRand.append(x[0])\n\t\t\telif count == 3: LatStdRand.append(float(line[1]))\n\t\t\telif count == 4: pass\n\t\t\telif count == 5: \n\t\t\t\t#print(x)\n\t\t\t\tallTpsRand[i-1].append(ast.literal_eval(line))\n\t\t\tcount += 1\n\n\n\n\n#### fullcross ####\n\nasymetricerrorStd = [tpsStd, tpsStd]\n\nx = shardList\ny = tpsMean\nfig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True)\nax0.errorbar(x, y, yerr=asymetricerrorStd, fmt='-o', label = \"DAVIS\")\nax0.set_ylabel(\"Tps\")\nax0.set_title(\"Evolution of the tps and latency in function of the nbr of shards\")\nax0.grid(True, which = \"both\")\n\n\ny = LatMean\nasymetricerrorLatency = [LatStd, LatStd]\nax1.errorbar(x, y, yerr=asymetricerrorLatency, fmt='-o', label = \"DAVIS\" )\nax1.set_ylabel(\"Latency[ms]\")\nax1.set_xlabel(\"Number of shards\")\nax1.grid(True, which = \"both\")\n\n#### nocross ####\n\nasymetricerrorStdRand = [tpsStdRand, tpsStdRand]\nx = [elem+0.05 for elem in x]\ny = tpsMeanRand\n\nax0.errorbar(x, y, yerr=asymetricerrorStdRand, fmt='-x', label = \"Random\")\n\ny = LatMeanRand\nasymetricerrorLatencyRand = [LatStdRand, LatStdRand]\nax1.errorbar(x, y, yerr=asymetricerrorLatencyRand, fmt='-x', label = \"Random\")\n\nax0.legend()\nax1.legend()\n\nplt.savefig(\"Graph_Clever_Random.png\")\n\n# plt.show()\n\n\n\n\nrows = len(shardList)\nfig, ax = plt.subplots(rows, \n\t\t\t\t\t sharex='col',figsize=(10,10))\n\t\t\t\t\t #sharey='row')\nax[0].set_title(\"Distribution of the tps in function of the number of shard used\")\n\nfor row in range(rows):\n\tnbr = len(allTps[shardList[row]-1][0])\n\ty = [0]*nbr\n\tax[row].scatter(allTps[shardList[row]-1][0], y, label = 'DAVIS')\n\tax[row].set_ylabel('shard:'+str(shardList[row]))\n\n\t\n\tnbr = len(allTpsRand[shardList[row]-1][0])\n\ty = [0.5]*nbr\t\n\tax[row].scatter(allTpsRand[shardList[row]-1][0], y, label = 'random')\n\tax[row].legend()\nplt.xlabel(\"Tps\")\nplt.savefig(\"Graph_distrib_Clever_Random.png\")\n\n\n\n###########################\n\"\"\"\n100 blocks\nClever\nedges || trans\n2 15.01 28.30\n3 18.46\t 35.41\n4 20.47 38.87\n5 18.97 41.52\n6 21.92 42.94\n7 22.51 44.08\n8 26.21 44.70\n9 24.33 45.67\n10 26.86 46.71\n\n\n\nrandom\n2 49.818 59.79\n3 66.23 66.23\n4 75.14 75.14\n5 80.37 85.39\n6 83.37 88.09\n7 84.93 89.74\n8 87.93 90.91\n9 88.59 92.12\n10 90.14 92.94\n\"\"\"\n\ncrossEdge = [15.01, 18.46, 20.47, 18.97, 21.92, 22.51, 26.21, 24.33, 26.86]\ncrossTran = [28.30, 35.41, 38.87, 41.52, 42.94, 44.08, 44.70, 45.67, 46.71]\n\nrcrossEdge = [49.81, 66.23, 75.14, 80.37, 83.37, 84.93, 87.93, 88.59, 90.14]\nrcrossTran = [59.79, 66.23, 75.14, 85.39, 88.09, 89.74, 90.91, 92.12, 92.94]\n\nfig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True)\nax0.plot(shardList, crossTran, label = \"DAVIS cross trans\")\nax0.plot(shardList, rcrossTran, label = \"Random cross trans\")\n\nax0.set_ylabel(\"Percentage [%]\")\nax0.set_title(\"Evolution of the Percentage of cross transactions\\n and cross edges in function of the nbr of shards\")\nax0.grid(True, which = \"both\")\n\n\nax1.plot(shardList, crossEdge, label = \"DAVIS cross edges\" )\nax1.plot(shardList, rcrossEdge, label = \"Random cross edges\" )\n\nax1.set_ylabel(\"Percentage [%]\")\nax1.set_xlabel(\"Number of shards\")\nax1.grid(True, which = \"both\")\n\nax0.legend()\nax1.legend()\n\nplt.savefig(\"Graph_Clever_Random_percentage.png\")\n","sub_path":"chainspacemeasurements/chainspacemeasurements/Resultats/range_300k_300100/mode5/fakeGraph.py","file_name":"fakeGraph.py","file_ext":"py","file_size_in_byte":4581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"65670079","text":"from openpyxl import load_workbook\nfrom PySide import QtGui, QtCore\nimport sys\n#import RPi.GPIO as GPIO\nimport time\ntick=1\nmovement_map = {\n\t\t\t\"Start\": 11,\n\t\t\t\"Left\": 13,\n\t\t\t\"Right\": 15,\n\t\t\t\"Down\": 16,\n\t\t\t\"Up\": 18,\n\t\t\t\"Jump\": 22,\n\t\t\t\"Spin\": 29,\n\t\t\t\"Run\": 31\n\t\t\t}\n\t\t\t\n\nclass Window(QtGui.QMainWindow):\n\n\tdef __init__(self):\n\t\tsuper(Window, self).__init__()\n\t\tself.setGeometry(50, 50, 500, 300)\n\t\tself.setWindowTitle(\"WinMarioMaker!\")\n\t\tself.setWindowIcon(QtGui.QIcon('pythonlogo.png'))\n\t\tself.home()\n\n\tdef home(self):\n\t\tbtn = QtGui.QPushButton(\"Quit\", self)\n\t\tbtn2 = QtGui.QPushButton(\"Start\", self)\n\t\tbtn3 = QtGui.QPushButton(\"Read Excel\", self)\n\n\n\t\tbtn.clicked.connect(QtCore.QCoreApplication.instance().quit)\n\t\tbtn.resize(100,100)\n\t\tbtn.move(0,0)\n\n\t\tbtn2.clicked.connect(self.readExcel)\n\t\tbtn2.resize(100,100)\n\t\tbtn2.move(0,100)\n\n\t\tbtn3.clicked.connect(self.readExcel)\n\t\tbtn3.move(100,0)\n\n\n\t\tself.show()\n\n\tdef startGame(self):\n\t\tprint(\"startGame!!!\")\n\t\treadExcel(self)\n\n\tdef readExcel(self):\n\t\t\n\t\twb = load_workbook('Moves.xlsx')#, read_onpythonly=True)\n\t\ttype(wb)\n\t\tsource = wb.active\n\n\t\tprint('| S | L | R | D | U | J | S | R |')\n\t\tfor row in source.iter_rows(min_row=2):\n\t\t\tval = '| {0} | {1} | {2} | {3} | {4} | {5} | {6} | {7} |'\n\t\t\tvalues = [0, 0, 0, 0, 0, 0, 0, 0]\n\t\t\tfor col in xrange(len(row)):\n\t\t\t\tvalue = row[col].value\n\t\t\t\tif value:\n\t\t\t\t\tvalues[col] = int(row[col].value)\n\t\t\t\telse:\n\t\t\t\t\tvalues[col] = 0\n\t\t\tprint(val.format(*values))\n\t\t\tself.pressButtons(values)\n\t\t\ttime.sleep(tick)\n\tdef pressButtons(self, values):\n\t\tpins = []\n\t\tfor i in xrange(len(values)):\n\t\t\tpin = movement_map[movement_map.keys()[i]]\n\t\t\tif values[i] == 1:\n\t\t\t\t#print \"on\"\n\t\t\t\t#GPIO.output(pin, GPIO.HIGH)\n\t\t\t\tvar = 0\n\t\t\telse:\n\t\t\t\t#print \"on\"\n\t\t\t\t#GPIO.output(pin, GPIO.HIGH)\n\t\t\t\tvar = 0\n\t\tprint(\"PRESSED BUTTONS\")\ndef run():\n\tapp = QtGui.QApplication(sys.argv)\n\tGUI = Window()\n\tsys.exit(app.exec_())\n\nrun()\n\nprint(\"COMPLET\")\n\ndef init_ui(self):\n\tgrid = QtGui.QGridLayout()\n\tself.read_btn = QtGui.QPushButton('Read Moves.xlsx', self)\n\tself.read_btn.clicked.connect(self.read_excel)\n\tgrid.addWidget(self.read_btn, 0, 0)\n\tself.setWindowTitle(\"SMMTAS\")\n\tself.setGeometry(100,100,125,30)\n\tself.show()\n\n#def main():\n \n\"\"\"if __name__ == \"__main__\":\n main()\n \"\"\"\n#GPIO.cleanup()\n","sub_path":"python/20161200_MarioMakerBot/[2016-12] MarioMakerBot.py","file_name":"[2016-12] MarioMakerBot.py","file_ext":"py","file_size_in_byte":2264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"123663057","text":"import tkinter\r\nroot=Tinker.tk()\r\nroot.Title(\"title\")\r\ndef browse():\r\n print(\"you are browsing\")\r\nlabel=tkinter.Label(root, text=\"label\")\r\nbrowseButton=tkinter.button(root, text=\"button\", command=browse)\r\nbrowseButton.Pack()\r\nlabel.Pack()\r\nroot.mainloop()\r\n#pyinstaller --onefile --windowed \"file\" (in CMD to make py into exe)","sub_path":"pygui.py","file_name":"pygui.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"253078068","text":"# -*- coding: utf-8 -*-\n\nfrom PIL import Image\nimport os\nimport pytesseract\nimport enchant\nimport string\n\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\n\n# pre config image, remove background\n\n\nclass OCR:\n def __init__(self):\n pass\n\n def pre_concert(self, img):\n width, height = img.size\n threshold = 30\n for i in range(0, width):\n for j in range(0, height):\n p = img.getpixel((i, j))\n r, g, b = p\n if r > threshold or g > threshold or b > threshold:\n img.putpixel((i, j), WHITE)\n else:\n img.putpixel((i, j), BLACK)\n img.save(\"images/pre_fig.jpg\")\n return\n\n def remove_noise(self, img, window=1):\n \"\"\"remove noise of pre configured image\"\"\"\n if window == 1:\n window_x = [1, 0, 0, -1, 0]\n window_y = [0, 1, 0, 0, -1]\n elif window == 2:\n window_x = [-1, 0, 1, -1, 0, 1, 1, -1, 0]\n window_y = [-1, -1, -1, 1, 1, 1, 0, 0, 0]\n width, height = img.size\n for i in range(width):\n for j in range(height):\n box = []\n\n for k in range(len(window_x)):\n d_x = i + window_x[k]\n d_y = j + window_y[k]\n try:\n d_point = img.getpixel((d_x, d_y))\n if d_point == BLACK:\n box.append(1)\n else:\n box.append(0)\n except IndexError:\n img.putpixel((i, j), WHITE)\n continue\n\n box.sort()\n if len(box) == len(window_x):\n mid = box[int(len(box) / 2)]\n if mid == 1:\n img.putpixel((i, j), BLACK)\n else:\n img.putpixel((i, j), WHITE)\n img.save(\"images/mov_noise_fig.jpg\")\n return\n\n def image_to_string(self, opened_img):\n '''return: string (recognized captcha)'''\n try:\n result = pytesseract.image_to_string(\n opened_img).strip().strip(string.punctuation).lower()\n # All captchas I've seen on Douban.com are typical English words, hence we use\n # PyEnchant to check whether the recognized word is a real word. \n d = enchant.Dict(\"en_US\")\n if result and d.check(result):\n return result\n else:\n print(\">> Automatically OCR failed, try recognize image manually.\")\n Image.open('images/captcha.jpg').show() \n return input(\">> Type in here what you see in the image: \")\n\n # print('>> Recognized captcha is: ', result)\n # print(\">> Not sure about the captcha, return one from the following :\",\n # d.suggest(result))\n # return d.suggest(result)[0]\n except BaseException as ex:\n print(ex)\n return None\n\n def process_image(self, img_path):\n '''\n return: String (recognized captcha)\n '''\n img = Image.open(img_path)\n self.pre_concert(img)\n self.remove_noise(img, 2)\n return self.image_to_string(img)\n\n\n\nif __name__ == '__main__':\n OCR().process('images/captcha.jpg')\n","sub_path":"douban/ocr.py","file_name":"ocr.py","file_ext":"py","file_size_in_byte":3409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"576020477","text":"\"\"\"\nQuestion 17\nQuestion:\nWrite a program that computes the net amount of a bank account based a transaction log\nfrom console input. The transaction log format is shown as following:\n\nD 100\nW 200\n\nD means deposit while W means withdrawal.\n\nSuppose the following input is supplied to the program:\n\nD 300\nD 300\nW 200\nD 100\n\nThen, the output should be:\n\n500\n\nHints:\nIn case of input data being supplied to the question, it should be assumed to be a console input.\"\"\"\n\n'''Solution by: popomaticbubble \n'''\ntransactions = []\n\nwhile True:\n text = input(\"> \")\n if text:\n text = text.strip('D ')\n text = text.replace('W ', '-')\n transactions.append(text)\n else:\n break\n\ntransactions = (int(i) for i in transactions)\nbalance = sum(transactions)\nprint(f\"Balance is {balance}\")\n","sub_path":"Python-Files/Day-5/Question-17-alternative-solution-3.py","file_name":"Question-17-alternative-solution-3.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"240870778","text":"# n = int(input())\n# mat = [[n]*n for i in range(n)]\n# mat[n-2][n-3]=n*n\n# print(mat)\n\n# n, m, k = (int(i) for i in input().split()) # чтение размеров поля и числа мин\n# a = [[0 for j in range(m)] for i in range(n)] # заполнение поля нулями\n# for i in range(k):\n# row, col = (int(i) - 1 for i in input().split())\n# a[row][col] = -1 # расставляем мины\n# print(a)\n\n#arr[col_len-1][row_len]+arr[col_len+1][row_len]+arr[col_len][row_len-1]+arr[col_len][row_len+1]\ncol_len=0\nrow_len=1\nmd=\"\"\narr=[]\na0,b0,an,bn=0,0,0,0\n# формируем матрицу\nwhile row_len>0: # бесконечный цикл\n md=input().split() # разбиваем строку на элементы для добавления в список\n if \"end\" in md:\n row_len-=1\n break # если в строке попалось END, закрываем список\n else:\n if row_len==1:\n col_len=len(md)\n row_len+=1 # счетчик\n arr.append(md) # добавляем элементы строки в двухмерный список\n# формируем нулевую матрицу\narr_sum = [[0 for j in range(col_len)] for i in range(row_len)]\n# определяем в матрице элементы из суммы смежных\nfor i in range(len(arr)):\n for j in range(len(arr[i])):\n if i-1<0: a0=row_len-1\n else: a0=i-1\n if j-1<0: b0=col_len-1\n else: b0=j-1\n if i+1>row_len-1: an=0\n else: an=i+1\n if j+1>col_len-1: bn=0\n else: bn=j+1\n arr_sum[i][j]= int(arr[a0][j])+int(arr[an][j])+int(arr[i][b0])+int(arr[i][bn])\n# выводим новую матрицу на экран\nfor row in arr_sum:\n for elem in row:\n print(elem, end=' ')\n print()","sub_path":"Black Box(Smoke)/scratches/scratch_5.py","file_name":"scratch_5.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"95240582","text":"# exception that input value is not a number\n# year = year = int(input('input year'))\n#\n# try:\n# year = year = int(input('input year'))\n# except ValueError:\n# print('Please enter a number')\n\n\n# exception that no key in the dictionary\n# dict = {1 : 'a', 2 : 'b', 3 : 'c'}\n# #dict[4]\n# try:\n# dict[4]\n# except KeyError:\n# print('try with another key')\n\n# exception that list is out of index\n# a_list = [1, 2, 3]\n# #a_list[4]\n# try:\n# a_list[4]\n# except IndexError:\n# print('List is as long as you think')\n\n# exception that no function exist\na_string = 123\n#a_string.append('f')\n# try:\n# a_string.append('f')\n# except AttributeError:\n# print('There is no function or fields as you defined, please be careful that python is dynamic type language')\n\n# exception that add a element to tuple\n# a_tuple = (1, 2, 3)\n# #a_tuple[3] = 4\n# try:\n# a_tuple[3] = 4\n# except TypeError:\n# print('tuple is immutable')\n\n# multiple exception\na_tuple = (1, 2, 3)\ntry:\n a_tuple[3] = 4\nexcept (AttributeError, TypeError, KeyError, IndexError):\n print('something wrong happened')\n\n# use Exception to catch all types of exceptions as Java did\ntry:\n a_tuple[3] = 4\nexcept Exception:\n print('something wrong happened')\n\n# python collection support different type, no exception and code works\nb_list = [1, 2, 3]\nb_list.append('a')\nprint(b_list)","sub_path":"Python Basic/jike_python_basic/exception_handling.py","file_name":"exception_handling.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"293462980","text":"#!/usr/bin/env python3\n\n# PyneCone is a debian/ubuntu iso creator\n\n# Script created by trash80 aka Scott Rogers\n# trash80@trash80.org\n\n#PyneCone\n## methods that aren't used yet, just here for fun\ndef greeting():\n print(\"Welcome to Pyne-builder\\n\")\n request_base()\n#end-def\n\ndef choose_custom_base():\n \"\"\"Choose Debian or Ubuntu custom base\"\"\"\n print(\"Would you like to use [D]ebian or [U]buntu as the base?\\n\")\n base = input(\"Choose D/d or U/u: \")\n return base\n#end-def\n\ndef choose_base():\n \"\"\"Choose backup, iso, or custom base\"\"\"\n print(\"Would you like to use your current install/[B]ackup, an [I]SO file, or [C]ustom install?\\n\")\n install = input(\"Choose B/b, I/i, or C/c: \")\n return install\n#end-def\n\ndef choose_level():\n \"\"\"Choose automatic or manual install\"\"\"\n print(\"Would you like to use [A]utomatic or [M]anual?\\n\")\n level = input(\"Choose A/a or M/m\")\n return level\n#end-def\n\ndef choose_manual_level():\n \"\"\"choose Expert or Amature level for manual creation\"\"\"\n print(\"Would you like [E]xpert or [A]mature mode?\\n\")\n mode = input(\"Choose E/e or A/a: \")\n return mode\n#end-def\n\ndef choose_debian_type():\n \"\"\"If debian custom, choose the type,ie stable, testing, or unstable\"\"\"\n print(\"Would you like [S]table, [T]esting, or [U]nstable base?\\n\")\n type = input(\"Choose S/s, T/t, U/u: \")\n return type\n#end-def\n\n## Create media and build directories\n# mkdir -p ${CD}/{${FS_DIR},boot/grub} ${WORK}/rootfs\n# boot/grub and /rootfs are required options, they will be hardcoded\n\nfs_type = ''\nuname_type = ''\n\nwhile fs_type != 'U' or fs_type != 'D':\n if choose_custom_base == U:\n fs_type = UBUNTU_FS_DIR\n uname_type = \"linux-ubuntu-modules-\"\n elif choose_custom_base == D:\n fs_type = DEBIAN_FS_DIR\n uname_type = \"squashfs-modules-\"\n\niso_dir = \"/\"\ngrub_dir = iso_dir + \"/boot/grub\"\nbuild_dir = \"/rootfs\"\ndir_list = [ 'dir', 'grub_dir', 'build_dir']\nmake_dir = \"mkdir -p \"\n\nfor i in dir_list:\n pyne_builder.pycone_do(make_dir + i)\n\n## end-create directories\n\n## Install required utilities\n# apt-get update && apt-get install genisoimage, grub,\n# squashfs-tools, {linux-ubuntu-modules-, squashfs-modules-\n\ndef apt_get_do(apt_option, package_list): # update, install, clean\n \"\"\"apt-get system for 'update', 'install', 'clean'\n returns true if successful, false if error\n \"\"\"\n try:\n if apt_option == \"install\":\n os.system(\"apt-get \" + apt_option + package_list)\n else:\n os.system(\"apt-get \" + apt_option)\n return True\n except OSError:\n return False\n#end-def\n\ndef get_base():\n\n # use inxi to figure out the base system\n base = get_uname()\n\n if base == \"unkown\":\n return False\n else:\n for name in base:\n if \"ubuntu\" in str.lower(str(name)):\n name = \"Ubuntu\"\n break\n elif \"debian\" in str.lower(str(name)):\n name = \"Debian\"\n break\n else:\n name = False\n return name\n#end-def\n","sub_path":"cone-scripts/musings/pynecone-stuff.py","file_name":"pynecone-stuff.py","file_ext":"py","file_size_in_byte":3137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"347836260","text":"from airflow.exceptions import AirflowException\nimport pytest\n\nfrom plugins.novoalign.operators import NovoAlignOperator\n\n\n@pytest.mark.parametrize('fail, executable, fastq_left_mate, fastq_right_mate, reference, samtag, output, output_format, novoalign_params, expected', [ # NOQA\n (\n False, 'novoalign', 'reads1.fq', 'reads2.fq', 'ref', 'samtag', 'output', 'SAM', None,\n 'novoalign -d ref -f reads1.fq reads2.fq -o SAM samtag > output'\n ),\n (\n False, 'novoalign', '1.fq', '2.fq', 'path/ref', '\\'@RG\\tID:1\\tPU:PU\\tLB:bar\\tPL:PL\\tSM:SM\\'', 'path/output', 'SAM', '-i PE', # NOQA\n 'novoalign -d path/ref -f 1.fq 2.fq -i PE -o SAM \\'@RG\\tID:1\\tPU:PU\\tLB:bar\\tPL:PL\\tSM:SM\\' > path/output'\n ),\n (\n True, 'novoalign', 'reads1.fq', 'reads2.fq', 'ref', None, 'output', 'SAM', None,\n 'novoalign -d ref -f reads1.fq reads2.fq -o SAM > output'\n ),\n])\ndef test_prepare_command(fail, executable, fastq_left_mate, fastq_right_mate, reference, samtag, output,\n output_format, novoalign_params, expected):\n \"\"\"Test prepare_command method.\n\n 1. Ensure it builds NovoAlign command correctly\n 2. Ensure to raise exception if SAM with no samtag is set\n \"\"\"\n task = NovoAlignOperator(task_id='test', executable=executable, reference=reference,\n fastq_left_mate=fastq_left_mate, fastq_right_mate=fastq_right_mate,\n samtag=samtag, output_format=output_format, output=output,\n novoalign_params=novoalign_params)\n if fail:\n with pytest.raises(AirflowException):\n task.prepare_command()\n else:\n assert task.prepare_command() == expected\n","sub_path":"plugins/novoalign/tests/test_novoalign_operator.py","file_name":"test_novoalign_operator.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"546528707","text":"from typing import Any, Callable, Tuple\nfrom dace.dtypes import paramdec\n\n\nclass Replacements(object):\n \"\"\" A management singleton for functions that replace existing function calls with either an SDFG or a node.\n Used in the Python frontend to replace functions such as `numpy.ndarray` and operators such\n as `Array.__add__`. \"\"\"\n\n _rep = {}\n _oprep = {}\n\n @staticmethod\n def get(name):\n \"\"\" Returns an implementation of a function. \"\"\"\n if name not in Replacements._rep:\n return None\n return Replacements._rep[name]\n\n @staticmethod\n def getop(classname: str, optype: str, otherclass: str = None):\n \"\"\" Returns an implementation of an operator. \"\"\"\n if otherclass is None:\n otherclass = classname\n if (classname, otherclass, optype) not in Replacements._oprep:\n return None\n return Replacements._oprep[(classname, otherclass, optype)]\n\n\n@paramdec\ndef replaces(func: Callable[..., Tuple[str]], name: str):\n \"\"\" Registers a replacement sub-SDFG generator for a function.\n :param func: A function that receives an SDFG, SDFGState, and the original function\n arguments, returning a tuple of array names to connect to the outputs.\n :param name: Full name (pydoc-compliant, including package) of function to replace.\n \"\"\"\n Replacements._rep[name] = func\n return func\n\n\n@paramdec\ndef replaces_operator(func: Callable[[Any, Any, str, str], Tuple[str]],\n classname: str,\n optype: str,\n otherclass: str = None):\n \"\"\" Registers a replacement sub-SDFG generator for an operator.\n :param func: A function that receives an SDFG, SDFGState, and the two operand array names,\n returning a tuple of array names to connect to the outputs.\n :param classname: The name of the class to implement the operator for (extends dace.Data).\n :param optype: The type (as string) of the operator to replace (extends ast.operator).\n :param otherclass: Optional argument defining operators for a second class that\n differs from the first.\n \"\"\"\n if otherclass is None:\n otherclass = classname\n Replacements._oprep[(classname, otherclass, optype)] = func\n return func\n","sub_path":"dace/frontend/common/op_repository.py","file_name":"op_repository.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"391383171","text":"class MagicDictionary(object):\n\n def __init__(self):\n self.dict = dict()\n \n\n def buildDict(self, dict):\n for word in dict:\n self.dict[len(word)] = self.dict.get(len(word),[])+[word]\n\n \n \n \n\n def search(self, word):\n if len(word) not in self.dict:\n return False\n for cand in self.dict.get(len(word),[]):\n countdiff = 0\n for i in range(len(word)):\n if word[i] != cand[i]:\n countdiff +=1\n if countdiff == 1:\n return True\n return False\n \n \n\n \n\n\n# Your MagicDictionary object will be instantiated and called as such:\n# obj = MagicDictionary()\n# obj.buildDict(dict)\n# param_2 = obj.search(word)\n","sub_path":"Trie/676. Implement Magic Dictionary.py","file_name":"676. Implement Magic Dictionary.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"22684662","text":"import os\nfrom functools import wraps\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom telegram.ext import Updater, MessageHandler, Filters, RegexHandler, CommandHandler\nimport sqlalchemy as sa\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker, scoped_session, relationship\nfrom datetime import datetime\n\nSession = scoped_session(sessionmaker())\n_Base = declarative_base()\n\n# таблички\nclass Base(_Base):\n __abstract__ = True\n q = Session.query_property()\n\n\nclass Chat(Base):\n __tablename__ = 'chats'\n\n id = sa.Column(sa.Integer, primary_key=True)\n tasks = relationship('Task', back_populates='chat', lazy='dynamic')\n\n\nclass Task(Base):\n __tablename__ = 'tasks'\n\n id = sa.Column(sa.Integer, primary_key=True, autoincrement=True)\n chat_id = sa.Column(sa.Integer, sa.ForeignKey('chats.id'), nullable=False)\n chat = relationship('Chat', uselist=False, lazy='joined', back_populates='tasks')\n text = sa.Column(sa.Text, nullable=False)\n added = sa.Column(sa.DateTime, nullable=False, default=datetime.utcnow)\n completed = sa.Column(sa.DateTime, nullable=True)\n\n\n# декораторы\ndef with_sesh(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n s = Session()\n try:\n result = fn(*args, s, **kwargs)\n except Exception:\n s.rollback()\n raise\n else:\n s.commit()\n finally:\n s.close()\n return result\n\n return wrapper\n\n\ndef with_chat(fn):\n # декоратор только для обработчиков\n @wraps(fn)\n def wrapper(bot, update, *args, **kwargs):\n chat_id = update.message.chat_id\n chat = Chat.q.get(chat_id)\n if not chat:\n chat = Chat(id=chat_id)\n Session().add(chat)\n return fn(bot, update, chat, *args, **kwargs)\n return wrapper\n\n\n# сами обработчики\n\n# /start\n@with_sesh\n@with_chat\ndef welcome(bot, update, chat, s):\n msg = \"\"\"Привет!\n- отправь любой текст чтобы сделать из него таск;\n- ответь \"+\"/\"-\" на таск, чтобы выполнить/отменить его;\n/todo - список невыполненных заданий;\n/done - список выполненных\"\"\"\n bot.send_message(chat_id=chat.id, text=msg)\n\n\n# \"-\" ответом на таск\n@with_sesh\n@with_chat\ndef remove_task(bot, update, chat, s):\n t = update.message.reply_to_message.text.strip()\n task = chat.tasks.filter_by(text=t).one()\n s.delete(task)\n bot.send_message(chat_id=chat.id, text='+')\n\n\n# \"+\" ответом на таск\n@with_sesh\n@with_chat\ndef complete_task(bot, update, chat, s):\n t = update.message.reply_to_message.text.strip()\n task = chat.tasks.filter_by(text=t).one()\n task.completed = datetime.utcnow()\n bot.send_message(chat_id=chat.id, text='+')\n\n\n# любое другое текстовое сообщение расценивается как новый таск\n@with_sesh\n@with_chat\ndef add_task(bot, update, chat, s):\n t = update.message.text.strip()\n exists = Task.q.filter(Task.text == t).count()\n if exists:\n bot.send_message(chat_id=chat.id, text='Этот таск уже есть ¯\\_(ツ)_/¯')\n return\n task = Task(text=t)\n chat.tasks.append(task)\n bot.send_message(chat_id=chat.id, text='+')\n\n\n# /todo список невыполненных\n@with_sesh\n@with_chat\ndef list_todo(bot, update, chat, s):\n tasks = chat.tasks.filter(Task.completed.is_(None)).order_by(Task.added.asc()).all()\n if not tasks:\n bot.send_message(chat_id=chat.id, text='Делать нечего ¯\\_(ツ)_/¯')\n else:\n for task in tasks:\n bot.send_message(chat_id=chat.id, text=task.text)\n\n\n# /done список выполненных\n@with_sesh\n@with_chat\ndef list_done(bot, update, chat, s):\n tasks = chat.tasks.filter(Task.completed.isnot(None)).order_by(Task.completed.desc()).all()\n if not tasks:\n bot.send_message(chat_id=chat.id, text='Ничего не сделано ¯\\_(ツ)_/¯')\n else:\n for task in tasks:\n bot.send_message(chat_id=chat.id, text=task.text)\n\n\n@with_sesh\n@with_chat\ndef respond_on_error(bot, update, error, chat, s):\n bot.send_message(chat_id=chat.id, text='¯\\_(ツ)_/¯')\n\n\nif __name__ == '__main__':\n\n # логгирование\n import logging\n logging.basicConfig(level=logging.DEBUG, format=logging.BASIC_FORMAT)\n log = logging.getLogger(__name__)\n\n # подключение к БД\n engine = sa.create_engine('sqlite:///tasks.sqlite')\n\n if not os.path.exists('tasks.sqlite'):\n log.info('creating database')\n Base.metadata.create_all(bind=engine)\n else:\n log.info('using existing database')\n\n Session.configure(bind=engine)\n\n # инициализация бота\n u = Updater(token=os.environ['BOT_TOKEN'])\n d = u.dispatcher\n\n # вешаем обработчики\n d.add_handler(RegexHandler(r'^-$', remove_task))\n d.add_handler(RegexHandler(r'^\\+$', complete_task))\n d.add_handler(MessageHandler(Filters.text, add_task))\n d.add_handler(CommandHandler('todo', list_todo))\n d.add_handler(CommandHandler('done', list_done))\n d.add_handler(CommandHandler('start', welcome))\n d.add_error_handler(respond_on_error)\n\n # стартуем\n u.start_polling(clean=True)\n\n # нормально завершаем при отлове сигналов\n u.idle()\n","sub_path":"taskbot.py","file_name":"taskbot.py","file_ext":"py","file_size_in_byte":5540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"43286102","text":"from typing import Dict, Any, List, Optional\n\nimport os\n\nfrom django.shortcuts import redirect\nfrom django.urls import path, include\n\n\ndef urls_oidc():\n return [\n path(\"oidc/\", include(\"mozilla_django_oidc.urls\"), name=\"oidc\"),\n path(\n \"accounts/login/\",\n lambda request: redirect(\"oidc_authentication_init\"),\n name=\"login\",\n ),\n ]\n\n\nAUTH_METHODS: Dict[str, Any] = {\n \"oidc\": {\n \"backends\": (\"mozilla_django_oidc.auth.OIDCAuthenticationBackend\",),\n \"installed_apps\": [\"mozilla_django_oidc\"],\n \"drf_auth_classes\": [\"mozilla_django_oidc.contrib.drf.OIDCAuthentication\"],\n \"urlfunc\": urls_oidc,\n },\n \"local\": {\n \"backends\": (),\n \"installed_apps\": [],\n \"drf_auth_classes\": [\n \"rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly\"\n ],\n \"urlfunc\": lambda: [path(\"accounts/\", include(\"django.contrib.auth.urls\"))],\n },\n}\n\nREST_FRAMEWORK: Dict[str, List[str]] = {\n \"DEFAULT_AUTHENTICATION_CLASSES\": [\n \"mozilla_django_oidc.contrib.drf.OIDCAuthentication\"\n ]\n}\n\nauth_method: Optional[str] = None\n\n# Try to auto-detect the used auth method\nfor envvar in os.environ.keys():\n if envvar.startswith(\"OIDC_\"): # pragma: no cover\n auth_method = \"oidc\"\n if envvar == \"LOCAL_AUTH\":\n auth_method = \"local\"\n\nif auth_method is None: # pragma: no cover\n print(\"No authentication method configured\")\n raise Exception(\n \"Please configure authentication or set the LOCAL_AUTH environment \"\n \"variable to use local auth\"\n )\n\n# The settings used in other settings\nAUTH_INFO = AUTH_METHODS[auth_method]\n\n# OpenID Connect configuration\nOIDC_RP_CLIENT_ID = os.environ.get(\"OIDC_RP_CLIENT_ID\")\nOIDC_RP_CLIENT_SECRET = os.environ.get(\"OIDC_RP_CLIENT_SECRET\")\nOIDC_OP_AUTHORIZATION_ENDPOINT = os.environ.get(\"OIDC_RP_ENDPOINT_AUTHORIZATION\")\nOIDC_OP_TOKEN_ENDPOINT = os.environ.get(\"OIDC_RP_ENDPOINT_TOKEN\")\nOIDC_OP_USER_ENDPOINT = os.environ.get(\"OIDC_RP_ENDPOINT_USER\")\nOIDC_OP_JWKS_ENDPOINT = os.environ.get(\"OIDC_RP_ENDPOINT_JWKS\")\nOIDC_RP_SIGN_ALGO = os.environ.get(\"OIDC_RP_SIGN_ALGO\")\n","sub_path":"zezere/settings_auth.py","file_name":"settings_auth.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"265951078","text":"# OZ Holidays\n\nfrom pandas import DateOffset\nfrom pandas.tseries.holiday import Holiday, MO, weekend_to_monday\n\nfrom pandas_market_calendars.market_calendar import MONDAY, TUESDAY\n\n# New Year's Day\nOZNewYearsDay = Holiday(\n \"New Year's Day\",\n month=1,\n day=1,\n observance=weekend_to_monday,\n)\n\n# Australia Day\nAustraliaDay = Holiday(\n \"Australia Day\",\n month=1,\n day=27,\n observance=weekend_to_monday,\n)\n\n# ANZAC Day\nAnzacDay = Holiday(\n \"ANZAC Day\",\n month=4,\n day=25,\n)\n\n# Queen's Birthday\nQueensBirthday = Holiday(\n \"Queen's Birthday\",\n month=6,\n day=1,\n offset=DateOffset(weekday=MO(2)),\n)\n\n# Christmas\nChristmas = Holiday(\n \"Christmas\",\n month=12,\n day=25,\n observance=weekend_to_monday,\n)\n\n\n# Boxing day\nBoxingDay = Holiday(\n \"Boxing Day\",\n month=12,\n day=26,\n)\n\n# If boxing day is saturday then Monday 28th is a holiday\n# If boxing day is sunday then Tuesday 28th is a holiday\nWeekendBoxingDay = Holiday(\n \"Weekend Boxing Day\",\n month=12,\n day=28,\n days_of_week=(MONDAY, TUESDAY),\n)\n","sub_path":"pandas_market_calendars/holidays_oz.py","file_name":"holidays_oz.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"593396215","text":"class Solution:\n def nextPermutation(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: void Do not return anything, modify nums in-place instead.\n \"\"\"\n i = len(nums) -2\n while i >= 0 and nums[i+1] <= nums[i]:\n i -= 1\n if i >= 0:\n j = len(nums) -1\n while j >= 0 and nums[j] <= nums[i]:\n j -= 1\n self.swap(nums, i, j)\n self.reverse(nums, i + 1)\n\n def swap(self, nums, i, j):\n temp = nums[i]\n nums[i] = nums[j]\n nums[j] = temp\n\n def reverse(self, nums, start_idx):\n i = start_idx\n j = len(nums) - 1\n while i < j:\n self.swap(nums, i, j)\n i += 1\n j -= 1\n\n\nif __name__ == \"__main__\":\n sol = Solution()\n # print(sol.nextPermutation([1,2,3,4]))\n nums = [1,2]\n nums2 = [1,3,2]\n print(sol.nextPermutation(nums))\n print(sol.nextPermutation(nums2))\n print(nums2)\n","sub_path":"lc_1-100/lc_31.py","file_name":"lc_31.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"457906676","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n'''\nMODULE: r.in.wcs.py\n\nAUTHOR(S): Martin Zbinden , inspired by \n module r.in.wms (GRASS7) by Stepan Turek \n \nPURPOSE: Downloads and imports data from WCS server (only version 1.0.0).\n According to http://grasswiki.osgeo.org/wiki/WCS\n \nVERSION: \t0.1\n\nDATE: \tMon Jun 16 21:00:00 CET 2014\n\nCOPYRIGHT: (C) 2014 Martin Zbinden and by the GRASS Development Team\n\nThis program is free software under the GNU General Public License\n(>=v2). Read the file COPYING that comes with GRASS for details.\n'''\n\n#%module\n#% description: Downloads and imports coverage from WCS server.\n#% keyword: raster\n#% keyword: import\n#% keyword: OGC web services\n#%end\n\n#%option\n#% key: url\n#% type: string\n#% description: Service URL (typically http://.../mapserv? )\n#% required: yes\n#% answer: http://...?\n#%end\n\n#%flag\n#% key: c\n#% description: Get the server capabilities then exit\n#% guisection: Request\n#%end\n\n#%option\n#% key: coverage\n#% type: string\n#% description: Coverage name to request\n#% multiple: no\n#% required: no\n#% guisection: Request\n#%end\n\n#%option\n#% key: urlparams\n#% type: string\n#% description: Additional query parameters to pass to the server\n#% guisection: Request\n#%end\n\n#%option\n#% key: username\n#% type:string\n#% description: Username for server connection\n#% guisection: Request\n#%end\n\n#%option\n#% key: password\n#% type:string\n#% description: Password for server connection\n#% guisection: Request\n#%end\n\n#%option\n#% key: output\n#% type: string\n#% gisprompt: new,cell,raster\n#% description: Name for output raster map (default: coveragename)\n#% required: no\n#%end\n\n#%option\n#% key: location\n#% type: string\n#% description: Name for new location to create\n#% required: no\n#%end\n\n#%option\n#% key: region\n#% type: string\n#% description: Name for region instead of current region\n#% required: no\n#%end\n\n\n\n\n\nimport os\nimport sys\nimport io\nimport grass.script as grass\nimport base64\nimport urllib2\nfrom httplib import HTTPException\nimport subprocess\n\ntry:\n import lxml.etree as etree\n LXML_AVAILABLE = True\nexcept ImportError:\n # this fallback is not thoroughly tested but works when\n # just starting the module and lxml is not available\n import xml.etree.ElementTree as etree\n LXML_AVAILABLE = False\n\n\nclass WCSBase:\n def __init__(self):\n # these variables are information for destructor\n self.temp_files_to_cleanup = []\n self.params = {}\n self.temp_map = None\n\n def __del__(self):\n\n # tries to remove temporary files, all files should be\n # removed before, implemented just in case of unexpected\n # stop of module\n for temp_file in self.temp_files_to_cleanup:\n grass.try_remove(temp_file)\n pass\n\n def _debug(self, fn, msg):\n grass.debug(\"%s.%s: %s\" %\n (self.__class__.__name__, fn, msg))\n\n def _initializeParameters(self, options, flags):\n '''\n Initialize all given and needed parameters. Get region information and \n calculate boundingbox according to it\n \n '''\n self._debug(\"_initializeParameters\", \"started\")\n\n for key in ['url', 'coverage','output','location']:\n self.params[key] = options[key].strip()\n\n if not self.params['output']:\n self.params['output'] = self.params['coverage']\n if not grass.overwrite(): \n result = grass.find_file(name = self.params['output'], element = 'cell')\n if result['file']:\n grass.fatal(\"Raster map <%s> does already exist. Choose other output name or toggle flag --o.\" % self.params['output'])\n\n for key in ['password', 'username', 'version','region']:\n self.params[key] = options[key]\n\n # check if authentication information is complete\n if (self.params['password'] and self.params['username'] == '') or \\\n (self.params['password'] == '' and self.params['username']):\n grass.fatal(_(\"Please insert both %s and %s parameters or none of them.\" % ('password', 'username')))\n\n\n # configure region extent (specified name or current region)\n self.params['region'] = self._getRegionParams(options['region'])\n self.params['boundingbox'] = self._computeBbox(self.params['region'])\n self._debug(\"_initializeParameters\", \"finished\")\n\n def _getRegionParams(self,opt_region):\n \"\"\"!Get region parameters from region specified or active default region\n\n @return region_params as a dictionary\n \"\"\"\n self._debug(\"_getRegionParameters\", \"started\")\n\n if opt_region:\n reg_spl = opt_region.strip().split('@', 1)\n reg_mapset = '.'\n if len(reg_spl) > 1:\n reg_mapset = reg_spl[1]\n\n if not grass.find_file(name = reg_spl[0], element = 'windows', \n mapset = reg_mapset)['name']:\n grass.fatal(_(\"Region <%s> not found\") % opt_region)\n\n if opt_region:\n s = grass.read_command('g.region',\n quiet = True,\n flags = 'ug',\n region = opt_region)\n region_params = grass.parse_key_val(s, val_type = float)\n grass.verbose(\"Using region parameters for region %s\" %opt_region)\n else:\n region_params = grass.region()\n grass.verbose(\"Using current grass region\")\n\n self._debug(\"_getRegionParameters\", \"finished\")\n return region_params\n\n\n def _computeBbox(self,region_params):\n \"\"\"!Get extent for WCS query (bbox) from region parameters\n\n @return bounding box defined by list [minx,miny,maxx,maxy]\n \"\"\"\n self._debug(\"_computeBbox\", \"started\")\n boundingboxvars = (\"w\",\"s\",\"e\",\"n\")\n boundingbox = list()\n for f in boundingboxvars:\n boundingbox.append(self.params['region'][f])\n grass.verbose(\"Boundingbox coordinates:\\n %s \\n [West, South, Eest, North]\" %boundingbox)\n self._debug(\"_computeBbox\", \"finished\")\n return boundingbox\n\n\n def GetMap(self, options, flags):\n \"\"\"!Download data from WCS server.\n\n @return mapname with downloaded data\n \"\"\"\n self._debug(\"GetMap\", \"started\")\n\n self._initializeParameters(options, flags)\n p = self._download()\n\n if p != 0:\n grass.fatal(\"Download or import of WCS data failed.\")\n return\n\n return self.params['output']\n\n def _fetchCapabilities(self, options, flags):\n \"\"\"!Download capabilities from WCS server\n \n @return cap (instance of method _fetchDataFromServer)\n \"\"\"\n self._debug(\"_fetchCapabilities\", \"started\")\n cap_url = options['url'].strip()\n\n if \"?\" in cap_url:\n cap_url += \"&\"\n else:\n cap_url += \"?\"\n\n cap_url += \"SERVICE=WCS&REQUEST=GetCapabilities&VERSION=\" + options['version']\n\n if options['urlparams']:\n cap_url += \"&\" + options['urlparams']\n\n grass.message('Fetching capabilities file\\n%s' % cap_url)\n\n try:\n cap = self._fetchDataFromServer(cap_url, options['username'], options['password'])\n print(dir(cap))\n except (IOError, HTTPException) as e:\n if urllib2.HTTPError == type(e) and e.code == 401:\n grass.fatal(_(\"Authorization failed to <%s> when fetching capabilities\") % options['url'])\n else:\n msg = _(\"Unable to fetch capabilities from <%s>: %s\") % (options['url'], e)\n\n if hasattr(e, 'reason'):\n msg += _(\"\\nReason: \") + str(e.reason)\n \n grass.fatal(msg)\n self._debug(\"_fetchCapabilities\", \"finished\")\n return cap\n\n def _fetchDataFromServer(self, url, username = None, password = None):\n \"\"\"!Fetch data from server\n \n \"\"\"\n self._debug(\"_fetchDataFromServer\", \"started\")\n\n request = urllib2.Request(url)\n if username and password:\n base64string = base64.encodestring('%s:%s' % (username, password)).replace('\\n', '')\n request.add_header(\"Authorization\", \"Basic %s\" % base64string)\n\n try:\n return urllib2.urlopen(request)\n except ValueError as error:\n grass.fatal(\"%s\" % error)\n\n self._debug(\"_fetchDataFromServer\", \"finished\")\n\n\n def GetCapabilities(self, options,flags):\n \"\"\"!Get capabilities from WCS server and print to stdout\n \n \"\"\"\n self._debug(\"GetCapabilities\", \"started\")\n\n cap = self._fetchCapabilities(options,flags)\n root = etree.fromstringlist(cap.readlines())\n cov_offering = []\n for label in root.iter('{*}CoverageOfferingBrief'):\n cov_offering.append(label.find('{*}name').text + \" : \" + label.find('{*}label').text)\n grass.message(\"Available layers:\")\n grass.message('\\n'.join(cov_offering))\n self._debug(\"GetCapabilities\", \"finished\")\n\n\n\n def _tempfile(self):\n \"\"\"!Create temp_file and append list self.temp_files_to_cleanup\n with path of file\n\n @return string path to temp_file\n \"\"\"\n self._debug(\"_tempfile\", \"started\")\n temp_file = grass.tempfile()\n if temp_file is None:\n grass.fatal(_(\"Unable to create temporary files\"))\n\n # list of created tempfiles for destructor\n self.temp_files_to_cleanup.append(temp_file)\n self._debug(\"_tempfile\", \"finished\")\n\n return temp_file\n\nclass WCSGdalDrv(WCSBase):\n def _createXML(self):\n \"\"\"!Create XML for GDAL WCS driver\n\n @return path to XML file\n \"\"\"\n self._debug(\"_createXML\", \"started\")\n\n gdal_wcs = etree.Element(\"WCS_GDAL\")\n server_url = etree.SubElement(gdal_wcs, \"ServiceUrl\")\n server_url.text =self.params['url']\n\n version = etree.SubElement(gdal_wcs, \"Version\")\n version.text =self.params['version']\n\n coverage = etree.SubElement(gdal_wcs, \"CoverageName\")\n coverage.text = self.params['coverage']\n\n if self.params['username']:\n userpwd = etree.SubElement(gdal_wcs,'UserPwd')\n userpwd.text = self.params['username']+':'+ self.params['password']\n\n xml_file = self._tempfile()\n\n etree_gdal_wcs = etree.ElementTree(gdal_wcs)\n grass.debug(etree_gdal_wcs)\n etree.ElementTree(gdal_wcs).write(xml_file)\n\n self._debug(\"_createXML\", \"finished -> %s\" % xml_file)\n\n return xml_file\n\n def _createVRT(self):\n '''! create VRT with help of gdalbuildvrt program\n VRT is a virtual GDAL dataset format\n \n @return path to VRT file\n '''\n self._debug(\"_createVRT\", \"started\")\n vrt_file = self._tempfile() \n command = [\"gdalbuildvrt\", '-te'] \n command += self.params['boundingbox']\n command += [vrt_file, self.xml_file]\n command = [str(i) for i in command]\n\n grass.verbose(' '.join(command))\n\n self.process = subprocess.Popen(command,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n self.out, self.err = self.process.communicate()\n grass.verbose(self.out)\n\n if self.err:\n grass.verbose(self.err+\"\\n\")\n if \"does not exist\" in self.err:\n grass.warning('Coverage \"%s\" cannot be opened / does not exist.' % self.params['coverage'])\n grass.fatal(\"Generation of VRT-File failed (gdalbuildvrt ERROR). Set verbose-flag for details.\")\n\n self._debug(\"_createVRT\", \"finished\")\n return vrt_file\n\n def _download(self):\n \"\"\"!Downloads data from WCS server using GDAL WCS driver\n\n @return ret (exit code of r.in.gdal module)\n \"\"\"\n self._debug(\"_download\", \"started\")\n\n self.xml_file = self._createXML()\n self.vrt_file = self._createVRT()\n\n grass.message('Starting module r.in.gdal ...')\n \n env = os.environ.copy()\n env['GRASS_MESSAGE_FORMAT'] = 'gui'\n\n if self.params['location'] == \"\":\n p = grass.start_command('r.in.gdal',\n input=self.vrt_file,\n output=self.params['output'],\n stdout = grass.PIPE,\n stderr = grass.PIPE,\n env = env\n )\n \n\n else:\n \tp = grass.start_command('r.in.gdal',\n input=self.vrt_file,\n output=self.params['output'],\n location = self.params['location'],\n stdout = grass.PIPE,\n stderr=grass.PIPE,\n env = env\n )\n \n while p.poll() is None:\n line = p.stderr.readline()\n linepercent = line.replace('GRASS_INFO_PERCENT:','').strip()\n if linepercent.isdigit():\n #print linepercent\n grass.percent(int(linepercent),100,1)\n else:\n grass.verbose(line)\n \n grass.percent(100,100,5)\n \n ret = p.wait()\n if ret != 0:\n grass.fatal('r.in.gdal for %s failed.' % self.vrt_file )\n else:\n grass.message('r.in.gdal was successful for new raster map %s ' % self.params['output'] )\n\n grass.try_remove(self.vrt_file)\n grass.try_remove(self.xml_file)\n self._debug(\"_download\", \"finished\")\n\n return ret\n\n\ndef main():\n url = options['url']\n coverage = options['coverage']\n output = options['output']\n location = options['location']\n region = options['region']\n urlparams = options['urlparams']\n username = options['username']\n password = options['password']\n flag_c = flags['c']\n\n options['version']=\"1.0.0\" # right now only supported version, therefore not in GUI\n\n if not LXML_AVAILABLE:\n grass.warning(\"The Python lxml is not installed.\"\n \" The functionality may be limited.\")\n\n grass.debug(\"Using GDAL WCS driver\")\n wcs = WCSGdalDrv() # only supported driver\n\n if flag_c:\n wcs.GetCapabilities(options,flags)\n\n else:\n grass.message(\"Importing raster map into GRASS...\")\n fetched_map = wcs.GetMap(options,flags)\n if not fetched_map:\n grass.warning(_(\"Nothing imported.\\n Data not has been downloaded from wcs server.\"))\n return 1\n\n return 0\n\nif __name__ == \"__main__\":\n options, flags = grass.parser()\n sys.exit(main())\n","sub_path":"grass7/raster/r.in.wcs/r.in.wcs.py","file_name":"r.in.wcs.py","file_ext":"py","file_size_in_byte":14892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"431348140","text":"from collections import deque\n\nfrom hdl_toolkit.synthesizer.interfaceLevel.interface import Interface\nfrom hls_toolkit.streamLvl.valObj import valObj\nfrom hdl_toolkit.bitmask import mask\n\n\ndef write(val, intf):\n assert isinstance(intf, Interface)\n if not hasattr(intf, \"_streamLvlSimData\"):\n intf._streamLvlSimData = deque()\n intf._streamLvlSimData.appendleft(val)\n \ndef read(intf):\n \"\"\"\n @return: valid, value\n \"\"\"\n if hasattr(intf, \"_streamLvlSimData\") and intf._streamLvlSimData:\n return True, intf._streamLvlSimData.pop()\n else:\n return False, None\n \ndef lookAt(intf):\n \"\"\"\n @return: valid, value\n \"\"\"\n if hasattr(intf, \"_streamLvlSimData\") and intf._streamLvlSimData:\n return True, intf._streamLvlSimData[-1]\n else:\n return False, None\n \ndef isEmpty(intf):\n if hasattr(intf, \"_streamLvlSimData\"):\n return not bool(intf._streamLvlSimData)\n else:\n return True\n \n\ndef packVal(val):\n try:\n intf = val._interface\n except AttributeError:\n return val\n \n _, v = _packVal(val, intf)\n return v\n\ndef _packVal(val, intf):\n if intf is not None and intf._interfaces:\n packedVal = 0\n width = 0\n for i in intf._interfaces:\n v = getattr(val, i._name)\n w, pv = _packVal(v, i)\n packedVal = (packedVal << w) | pv\n width += w\n \n return width, packedVal\n else:\n w = intf._dtype.bit_length()\n return w, val\n \ndef unpackVal(packedVal, intf, exclude=set()):\n _, v = _unpackVal(packedVal, intf, exclude)\n return v\n \ndef _unpackVal(packedVal, intf, exclude=set()):\n if intf._interfaces:\n unpackedVal = valObj(intf, exclude)\n width = 0\n for i in reversed(intf._interfaces):\n if i not in exclude:\n w, v = _unpackVal(packedVal, i, exclude)\n setattr(unpackedVal, i._name, v)\n packedVal >>= w\n width += w\n return width, unpackedVal\n else:\n w = intf._dtype.bit_length()\n return w, mask(w) & packedVal ","sub_path":"hls_toolkit/streamLvl/ops.py","file_name":"ops.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"78489733","text":"# coding: utf-8\nfrom django.conf.urls import include, url, patterns\nfrom apps import views\n\nurlpatterns = patterns('',\n url(r'^account/', include('apps.account.urls')),\n url(r'^book/', include('apps.book.urls')),\n url(r'^class/', include('apps.class.urls')),\n url(r'^gift/', include('apps.gift.urls')),\n url(r'^system/', include('apps.system.urls')),\n url(r'^task/', include('apps.task.urls')),\n url(r'^im/', include('apps.im.urls')),\n url(r'^video/', include('apps.video.urls')),\n)\n\nurlpatterns += patterns('apps.views',\n (r'^$', 'index'),\n (r'^favicon.ico$', 'favicon'),\n (r'^robots.txt$', 'robots'),\n (r'^ads.txt$', 'ads'),\n)\n","sub_path":"tbkt/apps/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"616250996","text":"\n\n# run this program on each RPi to send a labelled image stream\nimport socket\nimport time\nfrom picamutil import PiJpegStream\nimport imagezmq.imagezmq as imagezmq\nimport cv2\nimport argparse\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-H\", \"--hub\", required=True,\n help=\"image hub ip address or host name\")\nargs = vars(ap.parse_args())\n\nhub_host = args[\"hub\"]\nhub_host_url = 'tcp://{}:5555'.format(hub_host)\nprint('hub host: {}'.format(hub_host_url))\nsender = imagezmq.ImageSender(connect_to=hub_host_url)\n\nrpi_name = socket.gethostname() # send RPi hostname with each image\npicam = PiJpegStream(resolution=(640, 480), framerate=16).start()\ntime.sleep(2.0) # allow camera sensor to warm up\n\navg_time_to_send = None\n\nwhile True: # send images as stream until Ctrl-C\n s = time.time()\n image = picam.read()\n print('read {} bytes image in {} s'.format(len(image), time.time()-s))\n s = time.time()\n sender.send_jpg(rpi_name, image)\n time_to_send = time.time()-s\n avg_time_to_send = time_to_send if avg_time_to_send is None else (avg_time_to_send + time_to_send) / 2\n print('sent image in {} s'.format(avg_time_to_send))\n time.sleep(0.05)\n\npicam.close()\n\n","sub_path":"send_jpeg.py","file_name":"send_jpeg.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"514027546","text":"import sys\nimport azure.functions as func\nfrom datetime import datetime, timedelta\nimport json\nimport base64\nimport hashlib\nimport hmac\nimport requests\nimport re\nimport os\nimport logging\nfrom azure.storage.fileshare import ShareClient\nfrom azure.storage.fileshare import ShareFileClient\nfrom azure.core.exceptions import ResourceNotFoundError\n\njwt_api_key = os.environ['LookoutClientId']\njwt_api_secret = os.environ['LookoutApiSecret']\ncustomer_id = os.environ['WorkspaceID']\nshared_key = os.environ['WorkspaceKey']\nconnection_string = os.environ['AzureWebJobsStorage']\nlogAnalyticsUri = os.environ.get('logAnalyticsUri')\nbaseurl = os.environ['Baseurl'] \nmaxResults = os.environ['MaxResults'] \nAuthurl = baseurl+\"/apigw/v1/authenticate\"\ntable_name = \"LookoutCloudSecurity\"\nSchedule = os.environ['Schedule']\nfetchDelay = os.getenv('FetchDelay',5)\npastDays = os.getenv('PastDays',7)\nchunksize = 500\nMaxEventCount = 2000\ntoken = \"\"\n\n\nlogging.info(\"The Past days were taken as {}\".format(pastDays))\nlogAnalyticsUri = 'https://' + customer_id + '.ods.opinsights.azure.com'\n\npattern = r'https:\\/\\/([\\w\\-]+)\\.ods\\.opinsights\\.azure.([a-zA-Z\\.]+)$'\nmatch = re.match(pattern, str(logAnalyticsUri))\nif (not match):\n raise Exception(\"Lookout: Invalid Log Analytics Uri.\")\n\n##############################\n######State Manager###### \n##############################\nclass StateManager:\n def __init__(self, connection_string, share_name='funcstatemarkershare', file_path='Anomalyfuncmarkerfile'):\n self.share_cli = ShareClient.from_connection_string(conn_str=connection_string, share_name=share_name)\n self.file_cli = ShareFileClient.from_connection_string(conn_str=connection_string, share_name=share_name, file_path=file_path)\n\n def post(self, marker_text: str):\n try:\n self.file_cli.upload_file(marker_text)\n except ResourceNotFoundError:\n self.share_cli.create_share()\n self.file_cli.upload_file(marker_text)\n\n def get(self):\n try:\n return self.file_cli.download_file().readall().decode()\n except ResourceNotFoundError:\n return None\n##############################\n######lookout Connector###### \n##############################\n\nclass LookOut:\n\n def __init__(self):\n self.api_key = jwt_api_key\n self.api_secret = jwt_api_secret\n self.base_url = baseurl\n self.jwt_token_exp_hours = 1\n self.jwt_token = self.get_new_token() \n\n def get_new_token(self):\n url = Authurl\n payload = json.dumps({\n \"clientId\": self.api_key,\n \"clientSecret\": self.api_secret,\n \"grant_type\": \"refresh_token\"\n })\n headers = {\n 'Content-Type': 'application/json'\n }\n response = requests.request(\"POST\", url, headers=headers, data=payload)\n tokens = json.loads(response.text) \n return tokens['id_token'] \n\t \n def generate_date(self):\n current_time_day = datetime.utcnow().replace(second=0, microsecond=0) \n logging.info(\"Present time {}\".format(current_time_day))\n current_time_day = (current_time_day - timedelta(minutes=int(fetchDelay))).strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\") \n logging.info(\"the fetch delay taken as {} minutes\".format(fetchDelay))\n logging.info(\"After fetch delay applied time {}\".format(current_time_day))\n state = StateManager(connection_string)\n past_time = state.get()\n if past_time is not None:\n logging.info(\"The last time run happened at: {}\".format(past_time))\n else:\n logging.info(\"There is no last run timestamp, trying to get events for last week.\")\n logging.info(\"The past days were taken as {} days\".format(pastDays))\n past_time = (datetime.utcnow().replace(second=0, microsecond=0) - timedelta(days=int(pastDays))).strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\")\n return (past_time, current_time_day)\n\n def get_Data(self,report_type_suffix,startTime,endTime):\n \n try:\n headers = {\n 'Authorization':'Bearer'+' '+ self.jwt_token\n }\n payload = {}\n logging.info(\"The url being called: {}\".format(baseurl + report_type_suffix+\"&startTime=\"+startTime+\"&endTime=\"+endTime+\"&maxResults=\"+maxResults))\n response = requests.request(\"GET\", baseurl + report_type_suffix+\"&startTime=\"+startTime+\"&endTime=\"+endTime+\"&maxResults=\"+maxResults, headers=headers, data=payload)\n if response.status_code == 200:\n jsondata = json.loads(response.text)\n try:\n return jsondata['data']\n except KeyError:\n return []\n elif response.status_code == 400:\n logging.error(\"The requested report cannot be generated for this account because\"\n \" this account has not subscribed to toll-free audio conference plan.\"\n \" Error code: {}\".format(response.status_code))\n elif response.status_code == 401:\n logging.error(\"Unauthorized. Invalid access token. Error code: {}\".format(response.status_code)) \n else:\n logging.error(\"Something wrong. Error code: {}\".format(response.status_code))\n except Exception as err:\n logging.error(\"Something wrong. Exception error text: {}\".format(err))\n\n##############################\n######Sentinel Connector###### \n##############################\n\nclass Sentinel:\n\n def __init__(self):\n self.logAnalyticsUri = logAnalyticsUri\n self.success_processed = 0\n self.fail_processed = 0\n self.table_name = table_name\n self.chunksize = chunksize \n self.sharedkey = shared_key\n\n def gen_chunks(self, data):\n chunks = [data[i:i+chunksize] for i in range(0, len(data), chunksize)]\n logging.info(\"Entered into the chunks mode\") \n i = 0 \n for chunk in chunks: \n i = i+1\n logging.debug(\"Iteration chunk {}\".format(i)) \n body = json.dumps(chunk)\n logging.debug(body)\n self.post_data(body, len(chunk))\n state = StateManager(connection_string) \n latestTimeStamp = chunk[-1][\"timeStamp\"]\n zulu_time_format = \"%Y-%m-%dT%H:%M:%S.%fZ\"\n latestTimeStampnew = datetime.strptime(latestTimeStamp,zulu_time_format) + timedelta(milliseconds=1)\n logging.info(\"Chunk Timestamp {}\".format(latestTimeStampnew)) \n state.post(latestTimeStampnew.strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\"))\n\n\n def build_signature(self, date, content_length, method, content_type, resource):\n x_headers = 'x-ms-date:' + date\n string_to_hash = method + \"\\n\" + str(content_length) + \"\\n\" + content_type + \"\\n\" + x_headers + \"\\n\" + resource\n bytes_to_hash = bytes(string_to_hash, encoding=\"utf-8\")\n decoded_key = base64.b64decode(self.sharedkey)\n encoded_hash = base64.b64encode(hmac.new(decoded_key, bytes_to_hash, digestmod=hashlib.sha256).digest()).decode()\n authorization = \"SharedKey {}:{}\".format(customer_id, encoded_hash)\n return authorization\n\n def post_data(self, body, chunk_count):\n method = 'POST'\n content_type = 'application/json'\n resource = '/api/logs'\n rfc1123date = datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')\n content_length = len(body)\n signature = self.build_signature(rfc1123date, content_length, method, content_type,\n resource)\n uri = self.logAnalyticsUri + resource + '?api-version=2016-04-01'\n headers = {\n 'content-type': content_type,\n 'Authorization': signature,\n 'Log-Type': self.table_name,\n 'x-ms-date': rfc1123date\n }\n response = requests.post(uri, data=body, headers=headers)\n if (response.status_code >= 200 and response.status_code <= 299):\n logging.info(\"Chunk was processed{} events\".format(chunk_count))\n self.success_processed = self.success_processed + chunk_count\n else:\n logging.error(\"Error during sending events to Microsoft Sentinel. Response code:{}\".format(response.status_code))\n self.fail_processed = self.fail_processed + chunk_count \n\n# this function app is fired based on the Timer trigger\n# it is used to capture all the events from LookOut cloud security API \ndef main(mytimer: func.TimerRequest) -> None:\n utc_timestamp = datetime.utcnow().isoformat()\n if mytimer.past_due:\n logging.info('The timer is past due!')\n logging.info('Python timer trigger function ran at %s', utc_timestamp)\n logging.info('Starting program')\n results_events = []\n try:\n Lookout = LookOut()\n sentinel = Sentinel()\n sentinel.sharedkey = shared_key\n sentinel.table_name= table_name \n startTime,endTime = Lookout.generate_date() \n logging.info(\"The current run Start time {}\".format(startTime))\n logging.info(\"The current run End time {}\".format(endTime)) \n logging.info('Start: to get Anamolies')\n results_events = Lookout.get_Data(\"/apigw/v1/events?eventType=Anomaly\",startTime,endTime)\n logging.info(\"The number of Anamolies processed {} \".format(len(results_events)))\n logging.info('End: to get Anamolies') \n \n if(len(results_events)) > 0:\n # Sort the json based on the \"timestamp\" key\n sorted_data = sorted(results_events, key=lambda x: x[\"timeStamp\"],reverse=False) \n # Fetch the latest timestamp\n latest_timestamp = sorted_data[-1][\"timeStamp\"] \n logging.info(\"The latest timestamp {}\".format(latest_timestamp)) \n body = json.dumps(results_events)\n if(len(results_events) <= MaxEventCount):\n logging.debug(body) \n sentinel.post_data(body,len(results_events))\n state = StateManager(connection_string) \n zulu_time_format = \"%Y-%m-%dT%H:%M:%S.%fZ\"\n latestTimeStampnew = datetime.strptime(latest_timestamp,zulu_time_format) + timedelta(milliseconds=1)\n logging.info(\"The Final latest Timestamp {}\".format(latestTimeStampnew)) \n state.post(latestTimeStampnew.strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\"))\n elif(len(results_events) > MaxEventCount): \n sentinel.gen_chunks(sorted_data)\n\n sentinel_class_vars = vars(sentinel)\n success_processed, fail_processed = sentinel_class_vars[\"success_processed\"],\\\n sentinel_class_vars[\"fail_processed\"]\n logging.info('Total events processed successfully: {}, failed: {}. Period: {} - {}'\n .format(success_processed, fail_processed, startTime, endTime))\n except Exception as err:\n logging.error(\"Something wrong. Exception error text: {}\".format(err))\n logging.error( \"Error: LookOut Cloud Security events data connector execution failed with an internal server error.\")\n raise\n \n \n \n","sub_path":"Solutions/Lookout Cloud Security Platform for Microsoft Sentinel/Data Connectors/LookoutCSConnector/LookoutCloudSecurityAnamolies/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":11246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"267194747","text":"# Copyright 2020 The TensorFlow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific\n\"\"\"Class to test basis projection kernel\"\"\"\n\nimport os\nimport sys\nimport numpy as np\nimport tensorflow as tf\nfrom absl.testing import parameterized\nfrom tensorflow_graphics.util import test_case\n\nfrom pylib.pc import PointCloud, Grid, Neighborhood, AABB\nfrom pylib.pc.tests import utils\nfrom pylib.pc.layers import MCConv\nfrom pylib.pc.custom_ops import basis_proj\n\n\nclass BasisProjTest(test_case.TestCase):\n\n @parameterized.parameters(\n (2000, 200, [3, 3], 16, 0.7, 8, 2),\n (4000, 400, [3, 3], 8, np.sqrt(2), 8, 2),\n (2000, 200, [1, 3], 16, 0.7, 8, 3),\n (4000, 400, [3, 3], 8, 0.7, 8, 3),\n (4000, 100, [3, 1], 1, np.sqrt(3), 16, 3),\n (2000, 200, [3, 3], 16, 0.7, 8, 4),\n (4000, 400, [1, 3], 8, np.sqrt(4), 32, 4)\n )\n def test_basis_proj(self,\n num_points,\n num_samples,\n num_features,\n batch_size,\n radius,\n hidden_size,\n dimension):\n cell_sizes = np.float32(np.repeat(radius, dimension))\n points, batch_ids = utils._create_random_point_cloud_segmented(\n batch_size, num_points, dimension=dimension)\n features = np.random.rand(num_points, num_features[0])\n point_cloud = PointCloud(points, batch_ids)\n\n point_samples, batch_ids_samples = \\\n utils._create_random_point_cloud_segmented(\n batch_size, num_samples, dimension=dimension)\n\n point_cloud_samples = PointCloud(point_samples, batch_ids_samples)\n grid = Grid(point_cloud, cell_sizes)\n neighborhood = Neighborhood(grid, cell_sizes, point_cloud_samples)\n nb_ids = neighborhood._original_neigh_ids\n # tf\n conv_layer = MCConv(\n num_features[0], num_features[1], dimension, 1, [hidden_size])\n\n basis_weights_tf = tf.reshape(conv_layer._weights_tf[0],\n [dimension, hidden_size])\n basis_biases_tf = tf.reshape(conv_layer._bias_tf[0], [1, hidden_size])\n\n neigh_point_coords = points[nb_ids[:, 0]]\n center_point_coords = point_samples[nb_ids[:, 1]]\n kernel_input = (neigh_point_coords - center_point_coords) / radius\n basis_neighs = \\\n tf.matmul(kernel_input.astype(np.float32), basis_weights_tf) + \\\n basis_biases_tf\n basis_neighs = tf.nn.relu(basis_neighs)\n\n weighted_latent_per_sample_tf = basis_proj(basis_neighs,\n features,\n neighborhood)\n\n # numpy\n neighbor_ids = neighborhood._original_neigh_ids.numpy()\n nb_ranges = neighborhood._samples_neigh_ranges.numpy()\n # extract variables\n hidden_weights = basis_weights_tf.numpy()\n hidden_biases = basis_biases_tf.numpy()\n\n features_on_neighbors = features[neighbor_ids[:, 0]]\n # compute first layer of kernel MLP\n point_diff = (points[neighbor_ids[:, 0]] -\\\n point_samples[neighbor_ids[:, 1]])\\\n / np.expand_dims(cell_sizes, 0)\n\n latent_per_nb = np.dot(point_diff, hidden_weights) + hidden_biases\n\n latent_relu_per_nb = np.maximum(latent_per_nb, 0)\n # Monte-Carlo integration after first layer\n # weighting with pdf\n weighted_features_per_nb = np.expand_dims(features_on_neighbors, 2) * \\\n np.expand_dims(latent_relu_per_nb, 1)\n nb_ranges = np.concatenate(([0], nb_ranges), axis=0)\n # sum (integration)\n weighted_latent_per_sample = \\\n np.zeros([num_samples, num_features[0], hidden_size])\n for i in range(num_samples):\n weighted_latent_per_sample[i] = \\\n np.sum(weighted_features_per_nb[nb_ranges[i]:nb_ranges[i + 1]],\n axis=0)\n\n self.assertAllClose(weighted_latent_per_sample_tf,\n weighted_latent_per_sample)\n\n @parameterized.parameters(\n (8, 4, [8, 8], 2, np.sqrt(3) * 1.25, 8, 3)\n )\n def test_basis_proj_jacobian(self,\n num_points,\n num_samples,\n num_features,\n batch_size,\n radius,\n hidden_size,\n dimension):\n cell_sizes = np.float32(np.repeat(radius, dimension))\n points, batch_ids = utils._create_random_point_cloud_segmented(\n batch_size, num_points, dimension=dimension)\n features = np.random.rand(num_points, num_features[0])\n point_cloud = PointCloud(points, batch_ids)\n\n point_samples, batch_ids_samples = \\\n utils._create_random_point_cloud_segmented(\n batch_size, num_samples, dimension=dimension)\n\n point_cloud_samples = PointCloud(point_samples, batch_ids_samples)\n grid = Grid(point_cloud, cell_sizes)\n neighborhood = Neighborhood(grid, cell_sizes, point_cloud_samples)\n nb_ids = neighborhood._original_neigh_ids\n # tf\n conv_layer = MCConv(\n num_features[0], num_features[1], dimension, 1, [hidden_size])\n\n neigh_point_coords = points[nb_ids[:, 0].numpy()]\n center_point_coords = point_samples[nb_ids[:, 1].numpy()]\n kernel_input = (neigh_point_coords - center_point_coords) / radius\n\n basis_weights_tf = tf.reshape(conv_layer._weights_tf[0],\n [dimension, hidden_size])\n basis_biases_tf = tf.reshape(conv_layer._bias_tf[0], [1, hidden_size])\n\n basis_neighs = \\\n tf.matmul(kernel_input.astype(np.float32), basis_weights_tf) +\\\n basis_biases_tf\n basis_neighs = tf.nn.leaky_relu(basis_neighs)\n\n _, _, counts = tf.unique_with_counts(neighborhood._neighbors[:, 1])\n max_num_nb = tf.reduce_max(counts).numpy()\n\n with self.subTest(name='features'):\n def basis_proj_features(features_in):\n return basis_proj(basis_neighs,\n features_in,\n neighborhood) / (max_num_nb)\n\n self.assert_jacobian_is_correct_fn(\n basis_proj_features, [np.float32(features)], atol=1e-4, delta=1e-3)\n\n with self.subTest(name='neigh_basis'):\n def basis_proj_basis_neighs(basis_neighs_in):\n return basis_proj(basis_neighs_in,\n features,\n neighborhood) / (max_num_nb)\n\n self.assert_jacobian_is_correct_fn(\n basis_proj_basis_neighs,\n [np.float32(basis_neighs)],\n atol=1e-4, delta=1e-3)\n\n\nif __name__ == '__main___':\n test_case.main()\n","sub_path":"pylib/pc/layers/tests/basis_proj_test.py","file_name":"basis_proj_test.py","file_ext":"py","file_size_in_byte":6907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"370622057","text":"# coding: utf-8\n'''\nCreated on 2014-10-08\n\n@author: Administrator\n''' \nfrom libs import web\nfrom core.modules.module_handle import api_handle\nfrom core.auth import auth_base \n\nfrom utils.func_api import FuncResult \nfrom core.log.logging_trace import log_trace \nfrom utils.tools import json_to_obj \nfrom libs.orm.ormutils import get_data_from_sql2\nfrom utils.crypt import des_encrypt,des_decrypt\nimport re\nfrom apps.crm.common.get_roomratetype import handler as rmrate_handler\nfrom apps.crm.seller.get_seller_list import handler as seller_handler\nfrom apps.crm.agreement.get_agreement_info import handler as agrinfo_handler\n\nclass handler(object):\n '''查询协议\n ''' \n @auth_base()\n @api_handle(db=True) \n def GET(self): \n log_trace() \n input = web.input()\n smart_text = input.get('smart_text')\n \n result = self.get_data(smart_text)\n\n return result\n \n \n def get_data(self, smart_text):\n \n grp_code = web.ctx.session.login_hotel_info['Group']['HotelGroupCode']\n hotelcode = web.ctx.session.login_hotel_info['AuthInfo']['HotelCode']\n \n params = ['a.HotelGroupCode =:HotelGroupCode and a.State =:State ']\n params_value = {'HotelGroupCode': grp_code} \n params_value['State'] = 1 #审核通过\n \n if smart_text: \n if len(smart_text) == 11 and smart_text[0]=='1':\n params.append('a.ContractMobile = :Mobile')\n params_value['Mobile'] = des_encrypt(smart_text)\n elif re.findall('[\\x80-\\xff].', str(smart_text)):#中文,表示公司名称搜索\n params.append('a.AgreementName like :agr_name')\n params_value['agr_name'] = '%'+ smart_text +'%'\n elif len(smart_text):\n params.append('a.ContractNo = :ContractNo')\n params_value['ContractNo'] = smart_text\n else:\n return FuncResult(success=True, value=[])\n \n \n sql = ''' \n Select\n a.AgreementID, a.AgreementName, a.CreateHotelCode, a.HotelGroupCode, a.ContractPersion, \n a.ContractMobile, a.ContractPosition, a.BusinessCode, a.AgreementType, a.LegalPerson, \n a.ContractNo, a.CreateTime, a.CreateUserCode, a.CreateUserName, a.RoomRateTypeCode,\n a.SignSellerID, a.FollowSellerID\n From \n g_Agreement a\n '''\n\n ret = get_data_from_sql2(sql, params, params_value, '', '', None).value\n \n if ret['data'] and len(ret['data'])>0:\n sellers = seller_handler().get_all_seller()\n rmratetype = rmrate_handler().get_all_roomratetype()\n for d in ret['data']:\n# rmrate = [r for r in rmratetype if r['RoomRateTypeCode'] == d['RoomRateTypeCode'] and r['HotelCode'] == d['CreateHotelCode']]\n# if len(rmrate)>0:\n# d['RoomRateName'] = rmrate[0]['RoomRateName']\n if sellers.has_key(str(d['SignSellerID'])):\n d['SignSellerName'] = sellers[str(d['SignSellerID'])]['SellerName']\n if sellers.has_key(str(d['FollowSellerID'])):\n d['FollowSellerName'] = sellers[str(d['FollowSellerID'])]['SellerName']\n \n d['ContractMobile'] = des_decrypt(d['ContractMobile'])\n \n agrinfo = agrinfo_handler().get_agreement_info(d['AgreementID'], hotelcode)\n if agrinfo.success:\n if len(agrinfo.value[0]['Info']['RoomRateType'])>0:\n lst_info = [r for r in agrinfo.value[0]['Info']['RoomRateType'] if r.has_key('Flag') and r['Flag'] == 1]\n if len(lst_info)>0:\n d['RoomRateTypeCode'] = lst_info[0]['RoomRateTypeCode']\n d['RoomRateName'] = lst_info[0]['RoomRateName']\n \n if agrinfo.value[0].has_key('RoomRateFlag'):\n if len(agrinfo.value[0]['Info']['RoomType'])>0:\n d['lstRoomTypeRate'] = agrinfo.value[0]['Info']['RoomType']\n \n return FuncResult(success=True, value=ret)\n","sub_path":"crm/agreement/get_agreement_by_smart.py","file_name":"get_agreement_by_smart.py","file_ext":"py","file_size_in_byte":4305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"412656811","text":"\"\"\"\nGiven an array of integers that are out of order, determine the bounds of the smallest\nwindow that must be sorted in order for the entire array to be sorted. For example,\ngiven [ 3 , 7 , 5 , 6 , 9] , you should return ( 1 , 3 )\n\"\"\"\n\ndef locate_sorting_window(values):\n window_index = [0,len(values) - 1]\n max_value = -float('inf')\n min_value = float('inf')\n\n # Finding right-most window index\n for i, value in enumerate(values):\n max_value = max(value, max_value)\n # if statement checks if \"value\" is in a bad position\n # and needs to be considered inside the window, this\n # is because \"value\", while iterating from left to right\n # in the array, should not be \"less than\" the \"max_value\"\n # and be at its right\n if value < max_value:\n window_index[1] = i\n\n # Finding left-most window index\n for i, value in enumerate(values[::-1]):\n min_value = min(value, min_value)\n # Similarly, the \"value\" in iteration, while\n # checking from right to left, should not be\n # at \"min_value\"'s left; if so, \"value\" needs\n # to be considered inside the window\n if value > min_value:\n window_index[0] = len(values) - i - 1\n\n return window_index\n\n\n# Testing\nprint(\"-\"*14, \"\\n\", \".: Testing :.\", \"\\n\", \"-\"*14, sep='')\ninputs = [\n [ 3 , 7 , 5 , 6 , 9],\n [ 3 , 7 , 5 , 6 , 4],\n [ 3 , 7 , 5 , 6 , 9, 1],\n]\n\nfor input_arg in inputs:\n print(f\"in: {input_arg}\\n|\\n┖-> out: {locate_sorting_window(input_arg)}\\n\")\n","sub_path":"DailyCodingProblem/01_Lists/2_smallest_sorting_window.py","file_name":"2_smallest_sorting_window.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"221541995","text":"names = []\r\nprint (\"Enter strings (end with DONE):\")\r\n\r\nwhile True:\r\n \r\n \r\n name = input(\"\")\r\n \r\n \r\n if name == 'DONE':\r\n break\r\n \r\n names.append(name)\r\n \r\nprint (\"\") \r\nprint (\"Right-aligned list:\") \r\nfor i in names:\r\n col_width = len(max(names, key=len))\r\n print (i.rjust(col_width))","sub_path":"examples/data/Assignment_6/rffada002/question1.py","file_name":"question1.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"533792284","text":"import os\nimport re\nimport unittest\nfrom array import array\nfrom collections import defaultdict, deque, Set\n\nimport numpy\n\nfrom tests.utils import TestCaseWithUtils, temp_attrs\n\ntry:\n from collections import Counter\nexcept ImportError:\n from counter import Counter\n\ntry:\n from collections import OrderedDict\nexcept ImportError:\n from ordereddict import OrderedDict\n\ntry:\n from collections import ChainMap\nexcept ImportError:\n from chainmap import ChainMap\n\nfrom cheap_repr import basic_repr, register_repr, cheap_repr, PY2, PY3, ReprSuppressedWarning, find_repr_function, \\\n raise_exceptions_from_default_repr\n\nos.environ['DJANGO_SETTINGS_MODULE'] = 'tests.fake_django_settings'\nimport django\n\ndjango.setup()\nfrom django.contrib.contenttypes.models import ContentType\n\n\nclass FakeExpensiveReprClass(object):\n def __repr__(self):\n return 'bad'\n\n\nregister_repr(FakeExpensiveReprClass)(basic_repr)\n\n\nclass ErrorClass(object):\n def __init__(self, error=False):\n self.error = error\n\n def __repr__(self):\n if self.error:\n raise ValueError()\n return 'bob'\n\n\nclass ErrorClassChild(ErrorClass):\n pass\n\n\nclass OldStyleErrorClass:\n def __init__(self, error=False):\n self.error = error\n\n def __repr__(self):\n if self.error:\n raise ValueError()\n return 'bob'\n\n\nclass OldStyleErrorClassChild(OldStyleErrorClass):\n pass\n\n\nclass DirectRepr(object):\n def __init__(self, r):\n self.r = r\n\n def __repr__(self):\n return self.r\n\n\nclass RangeSet(Set):\n def __init__(self, length):\n self.length = length\n\n def __contains__(self, x):\n pass\n\n def __iter__(self):\n for x in range(self.length):\n yield x\n\n def __len__(self):\n return self.length\n\n\nclass TestCheapRepr(TestCaseWithUtils):\n maxDiff = None\n\n def assert_cheap_repr(self, x, expected_repr):\n self.assertEqual(\n re.sub(r'0x[0-9a-f]+', '0xXXX', cheap_repr(x)),\n expected_repr)\n\n def assert_usual_repr(self, x):\n self.assert_cheap_repr(x, repr(x))\n\n def assert_cheap_repr_evals(self, s):\n self.assert_cheap_repr(eval(s), s)\n\n def assert_cheap_repr_warns(self, x, message, expected_repr):\n self.assert_warns(ReprSuppressedWarning,\n message,\n lambda: self.assert_cheap_repr(x, expected_repr))\n\n def test_registered_default_repr(self):\n x = FakeExpensiveReprClass()\n self.assertEqual(repr(x), 'bad')\n self.assert_cheap_repr(x, r'')\n\n def test_chain_map(self):\n self.assert_usual_repr(ChainMap({1: 2, 3: 4}, dict.fromkeys('abcd')))\n\n ex = (\"ChainMap([\"\n \"OrderedDict([('1', 0), ('2', 0), ('3', 0), ('4', 0), ...]), \"\n \"OrderedDict([('1', 0), ('2', 0), ('3', 0), ('4', 0), ...]), \"\n \"OrderedDict([('1', 0), ('2', 0), ('3', 0), ('4', 0), ...]), \"\n \"OrderedDict([('1', 0), ('2', 0), ('3', 0), ('4', 0), ...]), \"\n \"OrderedDict([('1', 0), ('2', 0), ('3', 0), ('4', 0), ...]), \"\n \"OrderedDict([('1', 0), ('2', 0), ('3', 0), ('4', 0), ...]), \"\n \"...])\")\n self.assert_cheap_repr(ChainMap([OrderedDict.fromkeys('1234567890', 0) for _ in range(10)]),\n ex)\n\n def test_list(self):\n self.assert_usual_repr([])\n self.assert_usual_repr([1, 2, 3])\n self.assert_cheap_repr([1, 2, 3] * 10, '[1, 2, 3, 1, 2, 3, ...]')\n\n def test_tuple(self):\n self.assert_usual_repr(())\n self.assert_usual_repr((1,))\n self.assert_usual_repr((1, 2, 3))\n self.assert_cheap_repr((1, 2, 3) * 10, '(1, 2, 3, 1, 2, 3, ...)')\n\n def test_sets(self):\n self.assert_usual_repr(set())\n self.assert_usual_repr(frozenset())\n self.assert_usual_repr({1, 2, 3})\n self.assert_usual_repr(frozenset({1, 2, 3}))\n self.assert_cheap_repr(set(range(10)),\n 'set([0, 1, 2, 3, 4, 5, ...])' if PY2 else\n '{0, 1, 2, 3, 4, 5, ...}')\n\n def test_dict(self):\n self.assert_usual_repr({})\n d1 = {1: 2, 2: 3, 3: 4}\n self.assert_usual_repr(d1)\n d2 = dict((x, x * 2) for x in range(10))\n self.assert_cheap_repr(d2, '{0: 0, 1: 2, 2: 4, 3: 6, ...}')\n\n if PY3:\n self.assert_usual_repr({}.keys())\n self.assert_usual_repr({}.values())\n self.assert_usual_repr({}.items())\n\n self.assert_usual_repr(d1.keys())\n self.assert_usual_repr(d1.values())\n self.assert_usual_repr(d1.items())\n\n self.assert_cheap_repr(d2.keys(),\n 'dict_keys([0, 1, 2, 3, 4, 5, ...])')\n self.assert_cheap_repr(d2.values(),\n 'dict_values([0, 2, 4, 6, 8, 10, ...])')\n self.assert_cheap_repr(d2.items(),\n 'dict_items([(0, 0), (1, 2), (2, 4), (3, 6), ...])')\n\n def test_defaultdict(self):\n d = defaultdict(int)\n self.assert_usual_repr(d)\n d.update({1: 2, 2: 3, 3: 4})\n self.assert_usual_repr(d)\n d.update(dict((x, x * 2) for x in range(10)))\n self.assert_cheap_repr(d, \"defaultdict(, {0: 0, 1: 2, 2: 4, 3: 6, ...})\")\n\n def test_deque(self):\n self.assert_usual_repr(deque())\n self.assert_usual_repr(deque([1, 2, 3]))\n self.assert_cheap_repr(deque(range(10)), 'deque([0, 1, 2, 3, 4, 5, ...])')\n\n def test_ordered_dict(self):\n self.assert_usual_repr(OrderedDict())\n self.assert_usual_repr(OrderedDict((x, x * 2) for x in range(3)))\n self.assert_cheap_repr(OrderedDict((x, x * 2) for x in range(10)),\n 'OrderedDict([(0, 0), (1, 2), (2, 4), (3, 6), ...])')\n\n def test_counter(self):\n self.assert_usual_repr(Counter())\n self.assert_cheap_repr_evals('Counter({0: 0, 2: 1, 4: 2})')\n self.assert_cheap_repr(Counter(dict((x * 2, x) for x in range(10))),\n 'Counter(10 keys)')\n\n def test_array(self):\n self.assert_usual_repr(array('l', []))\n self.assert_usual_repr(array('l', [1, 2, 3, 4, 5]))\n self.assert_cheap_repr(array('l', range(10)),\n \"array('l', [0, 1, 2, 3, 4, ...])\")\n\n def test_numpy_array(self):\n self.assert_usual_repr(numpy.array([]))\n self.assert_usual_repr(numpy.array([1, 2, 3, 4, 5]))\n self.assert_cheap_repr(numpy.array(range(10)),\n 'array([0, 1, 2, 3, 4, 5, ...])')\n\n def test_bytes(self):\n self.assert_usual_repr(b'')\n self.assert_usual_repr(b'123')\n self.assert_cheap_repr(b'abc' * 50,\n \"b'abcabcabcabcabcabcabcabcabca...bcabcabcabcabcabcabcabcabcabc'\")\n\n def test_str(self):\n self.assert_usual_repr('')\n self.assert_usual_repr(u'')\n self.assert_usual_repr(u'123')\n self.assert_usual_repr('123')\n self.assert_cheap_repr('abc' * 50,\n \"'abcabcabcabcabcabcabcabcabca...bcabcabcabcabcabcabcabcabcabc'\")\n\n def test_django_queryset(self):\n self.assert_cheap_repr(ContentType.objects.all(),\n '')\n\n def test_inheritance(self):\n class A(object):\n def __init__(self):\n pass\n\n class B(A):\n pass\n\n class C(A):\n pass\n\n class D(C):\n pass\n\n class C2(C):\n pass\n\n class C3(C, B):\n pass\n\n class B2(B, C):\n pass\n\n class A2(A):\n pass\n\n @register_repr(A)\n def repr_A(_x, _helper):\n return 'A'\n\n @register_repr(C)\n def repr_C(_x, _helper):\n return 'C'\n\n @register_repr(B)\n def repr_B(_x, _helper):\n return 'B'\n\n @register_repr(D)\n def repr_D(_x, _helper):\n return 'D'\n\n self.assert_cheap_repr(A(), 'A')\n self.assert_cheap_repr(B(), 'B')\n self.assert_cheap_repr(C(), 'C')\n self.assert_cheap_repr(D(), 'D')\n self.assert_cheap_repr(C2(), 'C')\n self.assert_cheap_repr(C3(), 'C')\n self.assert_cheap_repr(B2(), 'B')\n self.assert_cheap_repr(A2(), 'A')\n\n self.assertEqual(find_repr_function(A), repr_A)\n self.assertEqual(find_repr_function(B), repr_B)\n self.assertEqual(find_repr_function(C), repr_C)\n self.assertEqual(find_repr_function(D), repr_D)\n self.assertEqual(find_repr_function(C2), repr_C)\n self.assertEqual(find_repr_function(C3), repr_C)\n self.assertEqual(find_repr_function(B2), repr_B)\n self.assertEqual(find_repr_function(A2), repr_A)\n\n def test_exceptions(self):\n with temp_attrs(cheap_repr, 'raise_exceptions', True):\n with self.assertRaises(ValueError):\n cheap_repr(ErrorClass(True))\n\n for C in [ErrorClass, OldStyleErrorClass]:\n name = C.__name__\n self.assert_usual_repr(C())\n self.assert_cheap_repr_warns(\n C(True),\n \"Exception 'ValueError' in repr_object for object of type %s. \"\n \"The repr has been suppressed for this type.\" % name,\n '<%s instance at 0xXXX (exception in repr)>' % name,\n )\n self.assert_cheap_repr(C(), '<%s instance at 0xXXX (repr suppressed)>' % name)\n for C in [ErrorClassChild, OldStyleErrorClassChild]:\n name = C.__name__\n self.assert_cheap_repr(C(), '<%s instance at 0xXXX (repr suppressed)>' % name)\n\n def test_func_raise_exceptions(self):\n class T(object):\n pass\n\n @register_repr(T)\n def bad_repr(*_):\n raise TypeError()\n\n bad_repr.raise_exceptions = True\n\n with self.assertRaises(TypeError):\n cheap_repr(T())\n\n class X(object):\n def __repr__(self):\n raise IOError()\n\n class Y: # old-style in python 2\n def __repr__(self):\n raise IOError()\n\n raise_exceptions_from_default_repr()\n\n for C in [X, Y]:\n with self.assertRaises(IOError):\n cheap_repr(C())\n\n def test_default_too_long(self):\n self.assert_usual_repr(DirectRepr('hello'))\n self.assert_cheap_repr_warns(\n DirectRepr('long' * 500),\n 'DirectRepr.__repr__ is too long and has been suppressed. '\n 'Register a repr for the class to avoid this warning '\n 'and see an informative repr again, '\n 'or increase cheap_repr.suppression_threshold',\n 'longlonglonglonglonglonglong...glonglonglonglonglonglonglong')\n self.assert_cheap_repr(DirectRepr('hello'),\n '')\n\n def test_maxparts(self):\n self.assert_cheap_repr(list(range(8)),\n '[0, 1, 2, 3, 4, 5, ...]')\n self.assert_cheap_repr(list(range(20)),\n '[0, 1, 2, 3, 4, 5, ...]')\n with temp_attrs(find_repr_function(list), 'maxparts', 10):\n self.assert_cheap_repr(list(range(8)),\n '[0, 1, 2, 3, 4, 5, 6, 7]')\n self.assert_cheap_repr(list(range(20)),\n '[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ...]')\n\n def test_recursive(self):\n lst = [1, 2, 3]\n lst.append(lst)\n self.assert_cheap_repr(lst, '[1, 2, 3, [1, 2, 3, [1, 2, 3, [...]]]]')\n\n d = {1: 2, 3: 4}\n d[5] = d\n self.assert_cheap_repr(\n d, '{1: 2, 3: 4, 5: {1: 2, 3: 4, 5: {1: 2, 3: 4, 5: {...}}}}')\n\n def test_custom_set(self):\n self.assert_cheap_repr(RangeSet(0), 'RangeSet()')\n self.assert_cheap_repr(RangeSet(3), 'RangeSet({0, 1, 2})')\n self.assert_cheap_repr(RangeSet(10), 'RangeSet({0, 1, 2, 3, 4, 5, ...})')\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":12208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"284016718","text":"\"\"\"Simulate a Measurement Set.\"\"\"\n\nimport subprocess\nimport argparse\nimport ConfigParser\n\nclass EqualsSpaceRemover:\n output_file = None\n def __init__( self, new_output_file ):\n self.output_file = new_output_file\n\n def write( self, what ):\n self.output_file.write( what.replace( \" = \", \"=\", 1 ) )\n\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='Simulation script.',\n epilog='')\n parser.add_argument('--inifile', dest='ini_file', type=str , default='./config/1_0', help='INI config file.')\n\n parser.add_argument('--outputmsfile', dest='output_msfile', type=str , default='./config/1_0.ms', help='output Measuremnet Set file.')\n \n\n args = parser.parse_args()\n \n ini_file = args.ini_file\n\n output_msfile = args.output_msfile\n \n config = ConfigParser.RawConfigParser()\n\n config.read(ini_file)\n\n if output_msfile == None :\n raise NameError\n else:\n config.set('interferometer', 'ms_filename', output_msfile)\n with open(ini_file, 'w+') as configfile:\n config.write(EqualsSpaceRemover(configfile)) \n subprocess.call([\"/BIGDATA1/ac_shao_tan_1/OSKAR/OSKAR-2.7/bin/oskar_sim_interferometer\", ini_file])\n \n","sub_path":"test/OSKAR_CASA/MPI/run_interferometer.py","file_name":"run_interferometer.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"495151262","text":"#!/usr/bin/env python3\n# 5-11\nending=''\nordinals = [number for number in range(1, 10)]\nfor ordinal in ordinals:\n if ordinal == 1:\n ending = 'st'\n elif ordinal == 2:\n ending = 'nd'\n elif ordinal == 3:\n ending = 'rd'\n else:\n ending = 'th'\n print(str(ordinal) + ending)","sub_path":"chapter5/ordinals.py","file_name":"ordinals.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"461844119","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nos.chdir(\"/Users/tomoyuki/Desktop/keras_test\")\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom datetime import datetime\nfrom copy import deepcopy\nfrom tqdm import tqdm\nimport time\nimport shutil\nfrom sklearn.linear_model import LinearRegression as lm\nfrom sklearn.neighbors import KNeighborsRegressor as knn\n\nfrom scripts import make_data_funcs\nfrom scripts import ARIMA_funcs\n\nimport scripts.model as model\n\n\n# データ読み込み\ntrack = \"A\"\ndf_irregularity = pd.read_csv(f\"input/irregularity_{track}.csv\")\ndf_irregularity_phase_modified = pd.read_csv(f\"input/irregularity_{track}_phase_modified.csv\")\n\ndf_irregularity_phase_modified.head()\ndf_irregularity_phase_modified.tail()\ndf_irregularity_phase_modified.shape\n\n\n\n\n## スモールデータ =====================================\n# 訓練データ,評価データの設定\n#target_milage_id_list = range(6935,6945)\n#target_milage_id_list = range(6900,6970)\n#target_milage_id_list = range(6800,7200)\n#target_milage_id_list = range(6970,7100)\n#target_milage_id_list = range(1700,1730)#1723\ntarget_milage_id_list = range(8580,8600)#1723\n\n\nif track==\"A\":\n t_pred = 41#91\n start_date_id=150 # start_date_id日目の原系列,差分系列を初期値とする=>start_date_id+1日目から予測\n train_date_id_list = list(range(0, 200)) # track A\n lag_t = 0\n start_raw_file_name = f\"irregularity_{track}.csv\"\n \nelif track==\"B\":\n t_pred = 41#91\n start_date_id=100\n #train_date_id_list = list(range(0, 280)) # track B\n #train_date_id_list = list(range(300,360))\n train_date_id_list = list(range(0,100))\n #train_date_id_list = list(range(150, 280))\n lag_t = 12\n t_pred = 91\n start_raw_file_name = f\"irregularity_{track}.csv\"\n \n \nelif track==\"C\":\n t_pred = 41#91\n start_date_id=150\n #train_date_id_list = list(range(0, 220)) # track C\n train_date_id_list = list(range(280, 365)) # track C\n lag_t = 0\n start_raw_file_name = f\"irregularity_{track}.csv\"\n \nelif track==\"D\":\n t_pred = 41#91\n start_date_id=10\n# train_date_id_list = list(range(0, 250)) # track D\n train_date_id_list = list(range(140, 240))\n lag_t = 0\n start_raw_file_name = f\"irregularity_{track}.csv\"\n\nstart_date_id = start_date_id -1 - lag_t\ntest_date_id_list = range(start_date_id+1+lag_t, start_date_id+1+lag_t+t_pred)\n\n\n\n# 前処理(原系列)の設定\ntol_sigma_raw_prior = 2.5\nwindow=50\nmin_periods=3\ncenter=True\n\n# 前処理(差分系列)の設定\ntol_sigma_diff_prior = 2.0\nwindow_diff=3\nmin_periods_diff=1\ncenter_diff=True\n\n# 前処理(初期値)の設定\nstart_period = 30\nn_average_date = 5\nstart_average_method = \"mean\"#\"median\"\n\n\n# 予測モデルの設定\nmodel_name_pred = \"lm\" #\"SVR\"\nn_diff = 3\n\n## 後処理(予測結果修正)の設定\ntol_abnormal_max_min = 2.5\ntol_abnormal_upper = 25\ntol_abnormal_lower = -25\n\n\n## スモールデータ取得=========================================\ndf_irregularity_small = deepcopy(df_irregularity.iloc[:,target_milage_id_list])\ndf_irregularity_phase_modified_small = deepcopy(df_irregularity_phase_modified.iloc[:,target_milage_id_list])\n\n\n\n## 8700 ~見直し\n\n## 前処理 ==================================================================================\nprint(\"\\n・前処理 ===============================\")\ntime.sleep(0.5)\n\n# 0時点の原系列,差分系列をまとめる\norg_dict = {}\norg_dict[\"raw0\"] = deepcopy(df_irregularity_phase_modified_small)\n\n# 原系列の前処理:移動平均\norg_dict[\"raw0_prior_treated\"], tmp_raw0_median, tmp_raw0_median_diff = make_data_funcs.priorRawData(df_raw=org_dict[\"raw0\"], window=window, min_periods=min_periods, center=center, tol_diff=0.7, tol_n_group=5)\n\n\nfolder_name = \"movie\"\nmake_data_funcs.makeNewFolder(folder_name)\norg_dict[\"raw0\"].to_csv(f\"{folder_name}/raw0.csv\",index=False,header=True)\ntmp_raw0_median.to_csv(f\"{folder_name}/tmp_raw0_median.csv\",index=False,header=True)\ntmp_raw0_median_diff.to_csv(f\"{folder_name}/diff_prior_treated.csv\",index=False,header=True)\norg_dict[\"raw0_prior_treated\"].to_csv(f\"{folder_name}/raw0_prior_treated.csv\",index=False,header=True)\n\n\n\n\n# 差分系列の前処理:絶対値がmu+sigma*tol_sigma超過のデータをNaNに変更\norg_dict[\"diff0\"] = make_data_funcs.priorDiffData(org_df_raw=org_dict[\"raw0\"], df_raw=deepcopy(org_dict[\"raw0_prior_treated\"]), n_diff=n_diff, tol_sigma=tol_sigma_diff_prior, window=window_diff, min_periods=min_periods_diff, center=center_diff)\n \n\n# n_diff+1期分の差分系列をまとめる\nfor i in range(n_diff):\n org_dict[f\"diff{i+1}\"] = org_dict[\"diff0\"].shift(i+1)\n\n\n\n## 初期値の準備\nprint(\"\\n原系列初期値を取得\")\ntime.sleep(0.5)\nstart_raw_dict = make_data_funcs.makeStartRawDict(df_raw=df_irregularity_small, start_date_id=start_date_id, start_period=start_period, n_average_date=n_average_date, start_average_method=start_average_method)\n\n\nprint(\"\\n差分系列初期値を取得\")\ntime.sleep(0.5)\nstart_diff_dict, start_values_result_dict = make_data_funcs.makeStartDiffDict(df_dict=org_dict, n_diff=n_diff, start_date_id=start_date_id)\n\n\n\n\n\n## 予測モデル作成・逐次予測===============================================================\n# 訓練データを取得 \ntrain_dict = {}\nfor key in list(org_dict.keys()):\n train_dict[key] = deepcopy(org_dict[key].iloc[train_date_id_list,:])\n \n\nprint(\"\\n・オリジナル原系列の訓練データ範囲のデータ数調査===============================\")\ntime.sleep(0.5)\nn_org_train_dict = {}\nfor milage in tqdm(list(org_dict[\"raw0\"].columns)):\n n_org_train_dict[milage] = org_dict[\"raw0\"].iloc[train_date_id_list,:][milage].dropna().shape[0]\n\n\n## ARIMA(n_diff,1,0)による逐次予測\nprint(\"\\n・予測モデル作成・逐次予測 ===============================\")\nprint(\"ARIMA\")\ntime.sleep(0.5)\ndf_pred_raw_lm = ARIMA_funcs.predWithARIMA(train_dict=train_dict, start_raw_dict=start_raw_dict, start_diff_dict=start_diff_dict, n_diff=n_diff, start_date_id=start_date_id, t_pred=t_pred+lag_t, model_name=\"lm\", n_org_train_dict=n_org_train_dict)\n\n\nprint(\"直近5日間中央値\")\ntime.sleep(0.5)\ndf_pred_raw_mean = ARIMA_funcs.predWithARIMA(train_dict=train_dict, start_raw_dict=start_raw_dict, start_diff_dict=start_diff_dict, n_diff=n_diff, start_date_id=start_date_id, t_pred=t_pred+lag_t, model_name=\"median\", n_org_train_dict=n_org_train_dict)\n\n\n## 後処理 ==================================================================================\nprint(\"\\n・後処理 ===============================\")\ntime.sleep(0.5)\nabnormal_total, diagnosis_result = ARIMA_funcs.diagnosePredResult(df_pred=deepcopy(df_pred_raw_lm), df_train=deepcopy(train_dict[\"raw0_prior_treated\"]), tol_abnormal_max_min = tol_abnormal_max_min, tol_abnormal_upper = tol_abnormal_upper, tol_abnormal_lower = tol_abnormal_lower)\ndf_pred_raw_lm = ARIMA_funcs.postTreat(df_pred_raw=df_pred_raw_lm, abnormal_total=abnormal_total, start_raw_dict=start_raw_dict, t_pred=t_pred+lag_t)\ndf_pred_raw_lm = df_pred_raw_lm.iloc[range(lag_t, t_pred+lag_t),:]\ndf_pred_raw_mean = df_pred_raw_mean.iloc[range(lag_t, t_pred+lag_t),:]\n\n\n\n## 結果 =============================================================\n# 評価データを取得 \ntest_dict = {}\nfor key in list(org_dict.keys()):\n test_dict[key] = deepcopy(org_dict[key].iloc[test_date_id_list,:])\n\n# MAE計算・プロット\nprint(\"MAE lm\")\nmae_dict_lm = {}\nfor milage in list(df_pred_raw_lm.columns):\n mae_dict_lm[milage] = ARIMA_funcs.calcMAE(df_truth=test_dict[\"raw0\"][milage], df_pred=df_pred_raw_lm[milage])\nARIMA_funcs.plotTotalMAE(mae_dict=mae_dict_lm, ylim=[0.0, 1.0], r_plot_size=1, output_dir=f\"ARIMA_{{track}}_lm\")\n\nprint(\"MAE mean\")\nmae_dict_mean = {}\nfor milage in list(df_pred_raw_mean.columns):\n mae_dict_mean[milage] = ARIMA_funcs.calcMAE(df_truth=test_dict[\"raw0\"][milage], df_pred=df_pred_raw_mean[milage])\nARIMA_funcs.plotTotalMAE(mae_dict=mae_dict_mean, ylim=[0.0, 1.0], r_plot_size=1, output_dir=f\"ARIMA_{{track}}_mean\")\n\n\n# lmとmeanのMAEの差分を計算・プロット(-の値の部分でlmが優っている)\ndiff_mae = np.array(list(mae_dict_lm.values())) - np.array(list(mae_dict_mean.values()))\nplt.plot(diff_mae, color=\"red\");plt.ylim([-0.1, 0.1]);plt.grid();plt.show()\n\n\n\nfolder_name = \"pred_result_movie\"\nmake_data_funcs.makeNewFolder(folder_name)\n\ntrain_dict[\"raw0\"].to_csv(f\"{folder_name}/train.csv\",index=True,header=True)\ntest_dict[\"raw0\"].to_csv(f\"{folder_name}/test.csv\",index=True,header=True)\ndf_pred_raw_lm.to_csv(f\"{folder_name}/pred_ARIMA.csv\",index=True,header=True)\ndf_pred_raw_mean.to_csv(f\"{folder_name}/pred_mean.csv\",index=True,header=True)\n\npd.DataFrame({\"milage\":list(mae_dict_lm.keys()), \"MAE\":list(mae_dict_lm.values())}).to_csv(f\"{folder_name}/MAE_ARIMA.csv\",index=False,header=True)\npd.DataFrame({\"milage\":list(mae_dict_mean.keys()), \"MAE\":list(mae_dict_mean.values())}).to_csv(f\"{folder_name}/MAE_mean.csv\",index=False,header=True)\n\n\n\n\n\n# 結果をプロット\nmilage_id_list = range(0,1)\nylim=[-10,10]\nfor milage_id in milage_id_list:\n milage = list(df_pred_raw_lm.columns)[milage_id]\n print(f\"\\n{milage_id} {milage} ======================================================\\n\")\n print(\"lm\")\n \n ARIMA_funcs.PlotTruthPred(df_train=train_dict[\"raw0\"][milage], df_truth=test_dict[\"raw0\"][milage], df_pred=df_pred_raw_lm[milage], inspects_dict=None, \n ylim=ylim, r_plot_size=1,output_dir=f\"ARIMA_{track}_lm\", file_name=f\"{milage_id}_{milage}\")\n \n print(\"mean\")\n ARIMA_funcs.PlotTruthPred(df_train=train_dict[\"raw0\"][milage], df_truth=test_dict[\"raw0\"][milage], df_pred=df_pred_raw_mean[milage], inspects_dict=None, \n ylim=ylim, r_plot_size=1,output_dir=f\"ARIMA_{track}_mean\", file_name=f\"{milage_id}_{milage}\")\n \n \n\n\n\n \n### ニューラルネットワーク ========================================\n## dfから入力データ作成\n#y, X = model.dfDict2SAMInput(df_diff=df_spatio_diff)\n#\n## spatialARIモデル作成\n#spatialARIModel = model.spatialARIModel(input_shape=(X.shape[1],X.shape[2]))\n#spatialARIModel.summary()\n#\n## 学習\n#model.fit(x=X, y=y, batch_size=10, epochs=10, verbose=1)\n#model.get_weights()\n#\n#\n\n\n","sub_path":"python/keras_test/old/scripts_20190106/old/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":10278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"91834316","text":"import sys, os, platform\n\n#This function is used to print runtime process and error messages to console and log file\n#If the testcase is run on linux, it will only print the runtime process to console and will not create any log files\n\nclass Logger(object):\n\tdef __init__(self):\n\t\tself.terminal = sys.stdout\n\t\tfilename = str.split(sys.argv[0],'.') #get scriptname and split by '.' delimiter\n\t\tself.log = open(\"logs/\" + filename[0] + '.log' , \"w\")\n\n\tdef write(self, message):\n\t\tif platform.system() == 'Windows':\n\t\t\tself.terminal.write(message)\n\t\t\tself.log.write(message)\n\t\telif platform.system() == 'Linux':\n\t\t\tself.terminal.write(message)\n","sub_path":"utils/function/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"602213751","text":"\"\"\"Django signal handlers for relaydomains.\"\"\"\n\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.db.models import signals\nfrom django.dispatch import receiver\nfrom django.template import Template, Context\nfrom django.utils.translation import ugettext as _\n\nfrom modoboa.admin import models as admin_models\nfrom modoboa.admin import signals as admin_signals\nfrom modoboa.core import signals as core_signals\nfrom modoboa.lib.email_utils import split_mailbox\n\nfrom . import constants\nfrom . import forms\nfrom . import lib\nfrom . import models\nfrom . import postfix_maps\n\n\n@receiver(admin_signals.use_external_recipients)\ndef check_relaydomain_alias(sender, **kwargs):\n \"\"\"Allow the creation of an alias on a relaydomain.\"\"\"\n recipient = kwargs.get(\"recipients\")\n if not recipient:\n return\n localpart, domain = split_mailbox(recipient)\n if not (models.RelayDomain.objects.select_related().filter(\n domain__name=domain).exists()):\n return False\n if (admin_models.Mailbox.objects.select_related(\"domain\").filter(\n domain__name=domain, address=localpart).exists()):\n return False\n return True\n\n\n@receiver(signals.post_save, sender=admin_models.Domain)\ndef clean_domain(sender, instance, **kwargs):\n \"\"\"Remove or create RelayDomain record if needed.\"\"\"\n if kwargs.get(\"created\"):\n return\n if instance.type == \"domain\":\n try:\n instance.relaydomain.delete()\n except models.RelayDomain.DoesNotExist:\n pass\n else:\n # Make sure to create a RelayDomain instance since we can't do it\n # at form level...\n models.RelayDomain.objects.get_or_create(\n domain=instance,\n defaults={\"service\": models.Service.objects.first()}\n )\n\n\n@receiver(core_signals.register_postfix_maps)\ndef register_postfix_maps(sender, **kwargs):\n \"\"\"Register postfix maps.\"\"\"\n return [\n postfix_maps.RelayDomainsMap,\n postfix_maps.RelayDomainsTransportMap,\n postfix_maps.SplitedDomainsTransportMap,\n postfix_maps.RelayRecipientVerification\n ]\n\n\n@receiver(core_signals.extra_role_permissions)\ndef extra_role_permissions(sender, role, **kwargs):\n \"\"\"Add permissions to the Resellers group.\"\"\"\n return constants.PERMISSIONS.get(role, [])\n\n\n@receiver(core_signals.extra_static_content)\ndef static_content(sender, caller, st_type, user, **kwargs):\n \"\"\"Add extra static content.\"\"\"\n if caller != \"domains\" or st_type != \"js\":\n return \"\"\n\n t = Template(\"\"\"\n\n\"\"\")\n return t.render(Context({\"STATIC_URL\": settings.STATIC_URL}))\n\n\n@receiver(admin_signals.extra_domain_filters)\ndef extra_domain_filters(sender, **kwargs):\n \"\"\"Return relaydomain filters.\"\"\"\n return [\"srvfilter\"]\n\n\n@receiver(admin_signals.extra_domain_forms)\ndef extra_domain_form(sender, user, **kwargs):\n \"\"\"Return relay settings for domain edition.\"\"\"\n if not user.has_perm(\"relaydomains.change_relaydomain\"):\n return []\n domain = kwargs.get(\"domain\")\n if not domain or domain.type != \"relaydomain\":\n return []\n return [{\n \"id\": \"relaydomain\", \"title\": _(\"Relay settings\"),\n \"cls\": forms.RelayDomainFormGeneral,\n \"formtpl\": \"relaydomains/relaydomain_form.html\"\n }]\n\n\n@receiver(admin_signals.get_domain_form_instances)\ndef fill_domain_instances(sender, user, domain, **kwargs):\n \"\"\"Fill the relaydomain form with the right instance.\"\"\"\n condition = (\n not user.has_perm(\"relaydomains.change_relaydomain\") or\n domain.type != \"relaydomain\"\n )\n if condition:\n return {}\n return {\"relaydomain\": domain.relaydomain}\n\n\n@receiver(admin_signals.extra_domain_qset_filters)\ndef extra_domain_entries(sender, domfilter, extrafilters, **kwargs):\n \"\"\"Return extra queryset filters.\"\"\"\n if domfilter is not None and domfilter and domfilter != \"relaydomain\":\n return {}\n if \"srvfilter\" in extrafilters and extrafilters[\"srvfilter\"]:\n return {\"relaydomain__service__name\": extrafilters[\"srvfilter\"]}\n return {}\n\n\n@receiver(admin_signals.extra_domain_types)\ndef extra_domain_types(sender, **kwargs):\n \"\"\"Declare the relay domain type.\"\"\"\n return [(\"relaydomain\", _(\"Relay domain\"))]\n\n\n@receiver(admin_signals.extra_domain_wizard_steps)\ndef extra_wizard_step(sender, **kwargs):\n \"\"\"Return a step to configure the relay settings.\"\"\"\n return [forms.RelayDomainWizardStep(\n \"relay\", forms.RelayDomainFormGeneral, _(\"Relay domain\"),\n \"relaydomains/relaydomain_form.html\"\n )]\n\n\n@receiver(admin_signals.get_domain_tags)\ndef get_tags_for_domain(sender, domain, **kwargs):\n \"\"\"Return relay domain custom tags.\"\"\"\n if domain.type != \"relaydomain\":\n return []\n return domain.relaydomain.tags\n\n\n@receiver(admin_signals.import_object)\ndef get_import_func(sender, objtype, **kwargs):\n \"\"\"Return function used to import objtype.\"\"\"\n if objtype == \"relaydomain\":\n return lib.import_relaydomain\n return None\n","sub_path":"modoboa/relaydomains/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":5260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"566936755","text":"'''\nProject 1 - CS 457\nSimple Database and Queries\nCreated By Joshua Hosea at UNR\n'''\n\nimport sys\n\nimport sql_commands as sql\n\ndef run_input_file(input_file):\n '''\n This function reads an input file and splits the commands on new line characters.\n It will loop through all commands and execute them in order.\n\n Returns: 'All commands executed.'\n '''\n with open(sys.argv[1],'r') as f:\n sql_file = f.read()\n\n # Replaces new lines with spaces and adds a new line after semicolon, then splits using new line character\n sql_array = sql_file.replace('\\n',' ')\n sql_array = sql_array.replace(';',';\\n')\n sql_array = sql_array.split('\\n')\n # Removes leading and trailing white space\n sql_array = [x.strip() for x in sql_array]\n sql_array = [x for x in sql_array if x != '']\n\n database = ''\n\n for command in sql_array:\n print(f'Command entered: {command}')\n\n try:\n database = sql.execute_command(command, database)\n\n # Prints exeption if command isn't exit and continues\n except sql.Invalid_Command as ex:\n print(ex)\n\n print('All commands executed.')\n\n return None\n\n\ndef run_standard_input():\n '''\n This function reads one line for the input command and executes the command.\n\n Returns: None\n '''\n command = ''\n database = ''\n\n while(command != 'exit'):\n\n # Continues accepting input until a semicolon is inputted or the input is exit\n command = ''\n while(';' not in command and command != 'exit'):\n new_line = input('--> ')\n command += ' ' + new_line\n command = command.strip()\n\n try:\n database = sql.execute_command(command, database)\n\n # Prints exeption if command isn't exit and continues\n except sql.Invalid_Command as ex:\n if command != 'exit':\n print(ex)\n\n return None\n\n\nif __name__ == '__main__':\n\n # If a file was specified in the command line, we will run the program in file mode\n if len(sys.argv) > 1:\n run_input_file(sys.argv)\n\n # Else use standard input\n else:\n run_standard_input()\n","sub_path":"manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"201729593","text":"# Modules\nimport random\n# Sets up the initial value of parents.\nparents = int(input(\"Enter a starting amount for the parents: \"))\n# Sets the amount of offspring to 0.\noffSpring = 0\n\n# A loop syntax that continues the process until no parents remain.\nwhile parents > 0:\n # The factors determine if the coin flip is heads or tails,\n # generating a number from 0-1 (0 tails, 1 heads).\n factor1 = random.randrange(0, 2)\n factor2 = random.randrange(0, 2)\n # print(factor1, factor2) # DO NOT USE (DEBUG)\n # Coin flip = tails, tails\n if factor1 == 0 and factor2 == 0:\n # Removes a single parent from the cycle.\n parents -= 1\n # Coin flip = heads, tails\n elif factor1 == 1 and factor2 == 0:\n # Removes one parent and adds two offspring\n parents -= 1\n offSpring += 2\n # Coin flip = tails, heads\n elif factor1 == 0 and factor2 == 1:\n # Removes one parent and adds two offspring\n parents -= 1\n offSpring += 2\n # Coin flip = heads, heads\n else:\n # Nothing occurs.\n parents += 0\n\n# Prints the offspring left after the generation is finished.\nprint(\"Offspring: {}\".format(offSpring))\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"368782545","text":"from XenGarden.VBD import VBD\n\nfrom API.v1.VDI.serialize import serialize as _vdi_serialize\nfrom API.v1.VM.serialize import serialize as _vm_serialize\n\n\ndef serialize(vbd: VBD):\n vm = vbd.get_VM()\n vdi = vbd.get_VDI()\n\n if vm is not None:\n vm = _vm_serialize(vm)\n\n if vdi is not None:\n vdi = _vdi_serialize(vdi)\n\n return dict(\n vm=vm,\n vdi=vdi,\n bootable=vbd.get_bootable(),\n attached=vbd.get_currently_attached(),\n unpluggable=vbd.get_unpluggable(),\n device=vbd.get_device(),\n type=vbd.get_type(),\n uuid=vbd.get_uuid(),\n mode=vbd.get_mode(),\n )\n","sub_path":"API/v1/VBD/serialize.py","file_name":"serialize.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"94562114","text":"import matplotlib\nmatplotlib.use(\"Agg\")\nimport numpy\nimport os\nmy_home = os.popen(\"echo $HOME\").readlines()[0][:-1]\nimport matplotlib.pyplot as plt\nfrom sys import path,argv\npath.append('%s/work/mylib/'%my_home)\npath.append(\"E:/Github/astrophy-research/mylib/\")\nfrom plot_tool import Image_Plot\nimport tool_box\nimport h5py\n\n\n# plot the GGL signal\n# \"python .. cmass plot 1\" or \"python .. cmass calculate 1 3 4\n\nargc = len(argv)\narea_num = argc - 3\n# foreground\nfore_source = argv[1]\n# \"calculate\" or \"plot\"\ncmd = argv[2]\n\nparent_path = \"/mnt/perc/hklee/CFHT/gg_lensing/\"\nparent_result_path = parent_path + \"result/%s/cfht/\"%fore_source\n\nh5f = h5py.File(parent_result_path + \"w_%s/radius_0.hdf5\"%argv[3], \"r\")\nradius_bin = h5f[\"/radius_bin\"].value[:,0]\nh5f.close()\nradius_num = radius_bin.shape[0]-1\n\n# radius_num = 11\n# radius_bin = tool_box.set_bin_log(0.1, 15, radius_num+1)\n\n\nif area_num > 1:\n result_path = parent_result_path + \"result/%s_result_total.hdf5\"%fore_source\n result_path_npz = parent_result_path + \"result/%s_result_total.npz\"%fore_source\n dens_pic_path = parent_result_path + \"result/%s_total\"%fore_source\n dens_r_pic_path = parent_result_path + \"result/%s_total_sgimaxr\"%fore_source\nelse:\n result_path = parent_result_path + \"result/%s_result_w_%d.hdf5\"%(fore_source, int(argv[3]))\n result_path_npz = parent_result_path + \"result/%s_result_w_%d.npz\"%(fore_source, int(argv[3]))\n dens_pic_path = parent_result_path + \"result/%s_w_%d\"%(fore_source, int(argv[3]))\n dens_r_pic_path = parent_result_path + \"result/%s_w_%d_sigmaxr\"%(fore_source, int(argv[3]))\n\nylabels = [\"$\\gamma$\", \"$\\Delta\\Sigma \\; [\\\\rm{h \\cdot M_{\\odot}} \\cdot \\\\rm{pc^{-2}}]$\"]\nylabels_r = \"$\\\\rm{R}\\Delta\\Sigma \\; [\\\\rm{10^6\\ M_{\\odot}} \\cdot \\\\rm{pc^{-2}}]$\"\nxlabel = \"$\\\\rm{R} \\; [\\\\rm{Mpc} \\cdot \\\\rm{h^{-1}}]$\"\n\ncoeff = 554.682135528\n# the catalog contains\n# e_t, e_x, m, c, weight, crit_density,\n# integrate part of crit_density, transverse distance and redshift of background\n\n# result label\ncrit_t_lb, crit_t_sig_lb = 0, 1\ncrit_x_lb, crit_x_sig_lb = 2, 3\ntrans_dist_lb = 4\n\nif cmd == \"calculate\":\n result = numpy.zeros((5, radius_num))\n for ir in range(radius_num):\n\n stack_count = 0\n for ia in range(3, argc):\n h5f = h5py.File(parent_result_path + \"w_%d/radius_%d.hdf5\"%(int(argv[ia]), ir), \"r\")\n try:\n sub_num = h5f[\"/subset_num\"].value[0,0]\n\n for i_sub in range(sub_num):\n temp = h5f[\"/pair_data_%d\"%i_sub].value\n\n if temp.shape[0] > 0:\n if stack_count == 0:\n data = temp\n else:\n data = numpy.row_stack((data, temp))\n stack_count += 1\n except:\n pass\n h5f.close()\n\n if stack_count > 0:\n pair_num = data.shape[0]\n\n e_t = data[:, 0]\n e_x = data[:, 1]\n # m bias\n m_bias = data[:, 2]\n # weight\n weight_measure = data[:, 3]\n crit_integ = data[:, 4]\n crit = data[:, 5]\n # radius from the center\n dist = data[:, 6]\n redshif = data[:, 7]\n\n weight = weight_measure/crit_integ**2\n weight_sum = weight.sum()\n # weight_bias = weight * m_bias\n # weight_sum = tool_box.accurate_sum(weight, 10000)\n\n # two kinds of correction\n corr_m = 1 + numpy.sum(weight * m_bias)/weight_sum\n # corr_m = 1 + tool_box.accurate_sum(weight_bias, 10000)/weight_sum\n # corr_m = 1 + m_bias.mean()\n\n delta_crit_et = e_t*crit_integ*coeff*weight\n delta_crit_ex = e_x*crit_integ*coeff*weight\n\n delta_sigma_t = numpy.sum(delta_crit_et)/weight_sum/corr_m\n delta_sigma_x = numpy.sum(delta_crit_ex)/weight_sum/corr_m\n # delta_sigma_t = tool_box.accurate_sum(delta_crit_et, 10000)/weight_sum/corr_m\n # delta_sigma_x = tool_box.accurate_sum(delta_crit_ex, 10000)/weight_sum/corr_m\n\n # gamma_t = numpy.sum(weight*e_t)/weight_sum/corr_m\n # gamma_x = numpy.sum(weight*e_x)/weight_sum/corr_m\n\n r_mean = dist.mean()\n # r_mean = tool_box.accurate_sum(dist, 1000)/dist.shape[0]\n\n result[crit_t_lb, ir] = delta_sigma_t\n result[crit_t_sig_lb, ir] = delta_crit_et.std()/numpy.sqrt(pair_num)\n result[crit_x_lb, ir] = delta_sigma_x\n result[crit_x_sig_lb, ir] = delta_crit_ex.std()/numpy.sqrt(pair_num)\n result[trans_dist_lb, ir] = r_mean\n\n print(\"[%.5f, %.5f], %d galaxy pairs at radius %f (%f). ESD: %.3f (%.3f)\"%(\n radius_bin[ir],radius_bin[ir+1], pair_num, r_mean,(radius_bin[ir]+radius_bin[ir+1])/2,result[crit_t_lb, ir],result[crit_t_sig_lb, ir]))\n else:\n print(\"Skip [%.5f, %.5f], 0 galaxy pairs\"%(radius_bin[ir],radius_bin[ir+1]))\n h5f = h5py.File(result_path,\"w\")\n h5f[\"/data\"] = result\n h5f.close()\n numpy.savez(result_path_npz, result)\n\n img = Image_Plot()\n img.set_style()\n img.subplots(1, 1)\n # img.axs[0][0].errorbar(result[r_lb], result[gt_lb], result[gt_lb + 1], c=\"C1\", capsize=4, label=\"T\", marker=\"s\")\n # img.axs[0][0].errorbar(result[r_lb], result[gx_lb], result[gx_lb + 1], c=\"C2\", capsize=4, label=\"X\", marker=\"s\")\n\n img.axs[0][0].errorbar(result[trans_dist_lb], result[crit_t_lb], result[crit_t_sig_lb],\n c=\"C1\", marker=\"s\", capsize=4, mfc=\"none\",fmt=\" \",label=\"T\")\n img.axs[0][0].errorbar(result[trans_dist_lb], result[crit_x_lb], result[crit_x_sig_lb + 1],\n c=\"C2\", marker=\"s\", capsize=4,mfc=\"none\", fmt=\" \",label=\"X\")\n\n y_max = img.axs[0][0].set_ylim()[1]\n ylims = (0.1, 8000)\n # plot the line extracted from the paper\n\n w1_cfht_path = \"/home/hklee/work/CFHT/gg_lensing/dens_cluster/data.dat\"\n if os.path.exists(w1_cfht_path):\n dens_data = numpy.loadtxt(w1_cfht_path)\n img.axs[0][0].errorbar(dens_data[0], dens_data[1], dens_data[2:4], marker=\"s\",\n c=\"k\", capsize=4, mfc=\"none\",fmt=\" \", label=\"Dens cluster\")\n\n img.set_label(0, 0, 0, ylabels[1])\n img.set_label(0, 0, 1, xlabel)\n\n img.axs[0][0].set_yscale(\"log\")\n img.axs[0][0].set_ylim(ylims)\n img.axs[0][0].set_xscale(\"log\")\n xs = img.axs[0][0].set_xlim()\n # img.axs[0][0].plot([xs[0], xs[1]], [0, 0], linestyle=\"--\", linewidth=1, c=\"grey\")\n img.set_legend(0,0,loc=\"upper right\")\n\n # for j in range(10):\n # img.axs[0][0].plot([xs[0], xs[1]], [j, j], linewidth=0.5, c=\"grey\", alpha=0.5)\n # img.axs[0][0].plot([xs[0], xs[1]], [10 + 10*j, 10 + 10*j], linewidth=0.5,c=\"grey\", alpha=0.5)\n # img.axs[0][0].plot([xs[0], xs[1]], [100 + 100*j, 100 + 100*j], linewidth=0.5,c=\"grey\", alpha=0.5)\n #\n # img.axs[0][0].set_xlim(xs[0], xs[1])\n\n img.save_img(dens_pic_path + \".png\")\n img.set_style_default()\n img.close_img()\n\n # # plot R x \\Delta\\Sigma\n # img = Image_Plot()\n # img.set_style()\n # img.subplots(1,1)\n # img.set_label(0, 0, 0, ylabels_r)\n # img.set_label(0, 0, 1, xlabel)\n # img.axs[0][0].errorbar(result[trans_dist_lb], result[sigtxr_lb], result[sigtxr_lb + 1], c=\"C1\", capsize=4, label=\"X\", marker=\"s\")\n # img.axs[0][0].set_xscale(\"log\")\n # img.save_img(dens_r_pic_path + \".png\")\n # img.set_style_default()\n # img.close_img()\n\nif cmd == \"plot\":\n\n h5f = h5py.File(result_path,\"r\")\n result = h5f[\"/data\"].value\n h5f.close()\n\n img = Image_Plot()\n img.set_style()\n img.subplots(1, 1)\n # img.axs[0][0].errorbar(result[r_lb], result[gt_lb], result[gt_lb + 1], c=\"C1\", capsize=4, label=\"T\", marker=\"s\")\n # img.axs[0][0].errorbar(result[r_lb], result[gx_lb], result[gx_lb + 1], c=\"C2\", capsize=4, label=\"X\", marker=\"s\")\n\n img.axs[0][0].errorbar(result[trans_dist_lb], result[crit_t_lb], result[crit_t_sig_lb], c=\"C1\", mfc=\"none\", marker=\"s\",\n capsize=4,fmt=\" \", label=\"T\")\n img.axs[0][0].errorbar(result[trans_dist_lb], result[crit_x_lb], result[crit_x_sig_lb + 1], c=\"C2\", mfc=\"none\", marker=\"s\",\n capsize=4,fmt=\" \", label=\"X\")\n\n y_max = img.axs[0][0].set_ylim()[1]\n ylims = (0.1, 8000)\n\n # plot the line extracted from the paper\n\n w1_cfht_path = \"/home/hklee/work/CFHT/gg_lensing/dens_cluster/data.dat\"\n if os.path.exists(w1_cfht_path):\n dens_data = numpy.loadtxt(w1_cfht_path)\n img.axs[0][0].errorbar(dens_data[0], dens_data[1], dens_data[2:4], marker=\"s\",\n c=\"k\", capsize=4, mfc=\"none\",fmt=\" \", label=\"Dens cluster\")\n\n img.set_label(0, 0, 0, ylabels[1])\n img.set_label(0, 0, 1, xlabel)\n\n img.axs[0][0].set_yscale(\"log\")\n img.axs[0][0].set_ylim(ylims)\n img.axs[0][0].set_xscale(\"log\")\n xs = img.axs[0][0].set_xlim()\n # img.axs[0][0].plot([xs[0], xs[1]], [0, 0], linestyle=\"--\", linewidth=1, c=\"grey\")\n img.set_legend(0,0,loc=\"upper right\")\n\n # for j in range(10):\n # img.axs[0][0].plot([xs[0], xs[1]], [j, j], linewidth=0.7, c=\"grey\", alpha=0.6)\n # img.axs[0][0].plot([xs[0], xs[1]], [10 + 10*j, 10 + 10*j], linewidth=0.7,c=\"grey\", alpha=0.6)\n # img.axs[0][0].plot([xs[0], xs[1]], [100 + 100*j, 100 + 100*j], linewidth=0.7,c=\"grey\", alpha=0.6)\n #\n # img.axs[0][0].set_xlim(xs[0], xs[1])\n\n img.save_img(dens_pic_path + \".png\")\n img.set_style_default()\n img.close_img()\n\n # # plot R x \\Delta\\Sigma\n # img = Image_Plot()\n # img.set_style()\n # img.subplots(1,1)\n # img.set_label(0, 0, 0, ylabels_r)\n # img.set_label(0, 0, 1, xlabel)\n # img.axs[0][0].errorbar(result[r_lb], result[sigtxr_lb], result[sigtxr_lb + 1], c=\"C1\", capsize=4, label=\"X\", marker=\"s\")\n # img.axs[0][0].set_xscale(\"log\")\n # img.save_img(dens_r_pic_path + \".png\")\n # img.set_style_default()\n # img.close_img()\n\nprint(\"Images are saved in %s\"%dens_pic_path)\n\n","sub_path":"galaxy-galaxy lensing/GGL_calculation/CFHT/PSZ2LenS/ggl_plot_cfht.py","file_name":"ggl_plot_cfht.py","file_ext":"py","file_size_in_byte":10040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"226600663","text":"# tensorflow.keras\n# LSTM으로 모델링 (Dense 와 성능 비교)\n# 회귀모델\n\nimport numpy as np\n\n#1. DATA\nfrom tensorflow.keras.datasets import boston_housing\nfrom sklearn.model_selection import train_test_split\n\n# sklearn의 x와 y를 가져오는 방식이 다르다.\n(x_train, y_train), (x_test, y_test) = boston_housing.load_data()\nx_train, x_validation, y_train, y_validation = train_test_split(x_train, y_train, train_size = 0.9, shuffle = True, random_state=114)\n\nfrom sklearn.preprocessing import MinMaxScaler\nscaler = MinMaxScaler()\nscaler.fit(x_train)\nx_train = scaler.transform(x_train)\nx_test = scaler.transform(x_test)\nx_validation = scaler.transform(x_validation)\n\n# print(x_train.shape) # (323, 13)\n# print(x_test.shape) # (102, 13)\n# print(x_validation.shape) # (81, 13)\n\nx_train = x_train.reshape(x_train.shape[0],x_train.shape[1],1)\nx_test = x_test.reshape(x_test.shape[0],x_test.shape[1],1)\nx_validation = x_validation.reshape(x_validation.shape[0],x_validation.shape[1],1)\n\nprint(x_train.shape) # (323, 13, 1)\nprint(x_test.shape) # (102, 13, 1)\nprint(x_validation.shape) # (81, 13, 1)\n\n#2. Modeling\n\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, LSTM\n\nmodel = Sequential()\nmodel.add(LSTM(65, input_shape=(13,1), activation='relu'))\nmodel.add(Dense(65))\nmodel.add(Dense(26))\nmodel.add(Dense(13))\nmodel.add(Dense(13))\nmodel.add(Dense(1))\n\nmodel.summary()\n\n\n#3. Compile, Train\nmodel.compile(loss='mse',optimizer='adam',metrics=['mae'])\n\nfrom tensorflow.keras.callbacks import EarlyStopping\nealy_stopping = EarlyStopping(monitor='loss',patience=8,mode='min')\nmodel.fit(x_train, y_train, epochs=260, batch_size=13, validation_data=(x_validation, y_validation),verbose=1, callbacks=[ealy_stopping])\n\n#4. Evaluate, Predcit\nloss, mae = model.evaluate(x_test, y_test, batch_size=13)\nprint(\"loss : \", loss)\nprint(\"mae : \", mae)\n\ny_predict = model.predict(x_test)\n\n# RMSE\nfrom sklearn.metrics import mean_squared_error\ndef RMSE (y_test, y_predict) :\n return np.sqrt(mean_squared_error(y_test, y_predict))\nprint(\"RMSE : \", RMSE(y_test, y_predict))\n\n# R2\nfrom sklearn.metrics import r2_score\nr2 = r2_score(y_test, y_predict)\nprint(\"R2 : \", r2)\n\n\n# Dense\n# loss : 9.107584953308105\n# mae : 2.0973618030548096\n# RMSE : 3.017877734702501\n# R2 : 0.8905914829316571\n\n# LSTM\n# loss : 29.320236206054688\n# mae : 3.777653932571411\n# RMSE : 5.4148164106283945\n# R2 : 0.647778937637217","sub_path":"keras/keras33_LSTM1_boston2_keras.py","file_name":"keras33_LSTM1_boston2_keras.py","file_ext":"py","file_size_in_byte":2458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"148659101","text":"# Function to wait for I2C lock to be given\n\nimport sys\nimport time\n\nfrom .smbx_logging import Logger\n\n\nDEBUG = \"debug\" in sys.argv\n\n\nif DEBUG:\n class I2C():\n \"\"\"Stand-in class for debugging I2C interface\"\"\"\n \n log = Logger(\"debug\")\n log.start()\n\n def __init__(self):\n self._have_lock = False\n self._lock_t = -1\n\n def try_lock(self):\n if self._lock_t == -1:\n self._lock_t = time.perf_counter()\n else:\n self._have_lock = time.perf_counter() - self._lock_t > 0.25\n return self._have_lock\n\n def writeto(self, address, data):\n self.log.write(f\"I2C {data} to {address:#x}\", \"i2c_debug.txt\")\n\n def unlock(self):\n pass\n\n def deinit(self):\n self.log.close()\n\n i2c = I2C()\n\nelse:\n # Set up I2C for ADC control\n import busio\n i2c = busio.I2C(3, 2) #SCL, SDA\n\n\n\"\"\"\nObtaining I2C lock grants sole access to the I2C bus and it is good practice to request a\nlock to ensure stability and predictability. The I2C bus can be run without obtaining a\nlock without issues assuming nothing else will try to touch the bus.\n\"\"\"\ndef waitForI2CBusLock(timeout=1.0):\n log = Logger(\"I2C\")\n log.start()\n\n log.write(\"Waiting for lock on I2C bus to be granted\", \"low_freq.txt\", True, end=\"\")\n t_start = time.time()\n while not i2c.try_lock():\n if time.time() - t_start > timeout:\n raise RuntimeError(\"Waiting for I2C lock timed out\")\n print(\".\", end='')\n time.sleep(0.1) # Don't hog the processor busywaiting\n print()\n log.write(\"I2C lock obtained\", \"low_freq.txt\", True)\n\n log.close()\n","sub_path":"simbox/processing/i2c_interface.py","file_name":"i2c_interface.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"198854838","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nimport logging\nimport logging.handlers\nimport mailbox\nfrom optparse import OptionParser\n\nfrom config import LOGFILE, TMPDIR\n\n\ndef move_msg(key, frombox, tobox):\n msg = frombox[key]\n\n if key in tobox:\n if key in frombox:\n frombox.lock()\n frombox.discard(key)\n frombox.flush()\n frombox.unlock()\n return False\n\n tobox.lock()\n tobox.add(msg)\n tobox.flush()\n tobox.unlock()\n\n frombox.lock()\n frombox.discard(key)\n frombox.flush()\n frombox.unlock()\n return True\n\ndef remove_msg(key, box):\n if not key in box:\n return False\n\n box.lock()\n box.discard(key)\n box.flush()\n box.unlock()\n return True\n\n\ndef setup_logging(file_level=logging.INFO,\n console_level=logging.ERROR):\n logger = logging.getLogger('')\n logger.setLevel(file_level)\n\n fh = logging.handlers.RotatingFileHandler(LOGFILE,\n maxBytes=1024*1000,\n backupCount=3)\n\n ch = logging.StreamHandler(sys.stderr)\n ch.setLevel(console_level)\n\n fmt = '%(asctime)s - %(module)s - %(levelname)s - %(message)s'\n formatter = logging.Formatter(fmt)\n\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n\n logger.addHandler(fh)\n logger.addHandler(ch)\n\ndef handle_pidfile(filename):\n _, filename = os.path.split(filename)\n path = os.path.join(TMPDIR, filename + '.pid')\n if os.path.exists(path):\n raise Exception('Global lock on %s currently acquired' % path)\n\n with open(path, 'w') as f:\n print >>f, os.getpid()\n\ndef finish_pidfile(filename):\n _, filename = os.path.split(filename)\n path = os.path.join(TMPDIR, filename + '.pid')\n if os.path.exists(path):\n os.remove(path)\n\n\nclass MHOptionParser(OptionParser):\n\n def parse_args(self):\n options, args = OptionParser.parse_args(self)\n if options.verbose:\n setup_logging(console_level=logging.DEBUG)\n else:\n setup_logging()\n\n return options, args\n\n\ndef get_optparser():\n p = MHOptionParser()\n p.add_option('-v', '--verbose', dest='verbose',\n action='store_true', default=False)\n return p\n\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"325422189","text":"import os\nfrom ftplib import FTP\n\nftp = FTP('localhost')\nftp.login(\"vdidenko\", \"vdidenko123\")\nftp.retrlines(\"LIST\")\n\n#ftp.cwd(\"D:\\TEMP\\---Q2-2018\")\n#ftp.cwd(\"D:\\TEMP\\---Q2-2018\") # or ftp.cwd(\"folderOne/subFolder\")\n\nlisting = []\nftp.retrlines(\"LIST\", listing.append)\nwords = listing[0].split(None, 8)\nfilename = words[-1].lstrip()\nfilenames = ftp.nlst()\nprint(filenames)\n\nfor filename in filenames:\n local_filename = os.path.join(r\"D:/ftplocal\", filename)\n lf = open(local_filename, \"wb\")\nftp.retrbinary(\"RETR \" + filename, lf.write, 8 * 1024)\nlf.close()\n\n\n","sub_path":"TEST.py","file_name":"TEST.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"141933737","text":"from __future__ import print_function\nimport datetime\nimport os\nimport time\nfrom collections import defaultdict\nfrom difflib import SequenceMatcher\nimport xlsxwriter\nfrom commons import bd_area_info\nfrom commons.utilities import Utilities\nfrom config import db_info\n\n\nclass DSStsPartialMapping:\n __district_list = bd_area_info.get_l21_district()\n __thana_list = bd_area_info.get_l21_thana()\n __local_sts_scanner_stripped_dict = defaultdict(list)\n __db = db_info.get_live_read_only__db()\n __local_db = db_info.get_local_db()\n __local_sts_data_list = defaultdict(list)\n __local_sts_duplicate_data_list = {}\n __result_data_list = [\n {'DSScModi': 'DS Modified', 'DSScOri': 'DS Original', 'stsScOri': 'STS Original', 'DSScName': 'DS SchoolName',\n 'stsScName': 'STS SchoolName', 'ratio': 'Match Ratio', 'stripped': 'STS Stripped Value'}]\n\n def __init__(self):\n pass\n\n def __get_non_modified_local_sts_data(self):\n collection = self.__local_db.sts_analysis\n for data in collection.find({'schoolCode.modified': None}):\n self.__local_sts_data_list[data['stripped']].append(\n {'scName': data['name']['original'], 'scOriginal': data['schoolCode']['original']})\n self.__local_sts_scanner_stripped_dict[data['scannerId']].append(data['stripped'])\n\n def __get_manual_similar_match(self, ds_sc_original, ds_scanner_id):\n ds_stripped_value = Utilities.get_stripped_digit(ds_sc_original)\n ratio = 0.0\n stripped_value = ''\n if ds_stripped_value:\n result_match = 0\n for sts_value in self.__local_sts_scanner_stripped_dict[ds_scanner_id]:\n temp_match = 0\n calculated_ratio = 0.0\n if len(ds_stripped_value) > len(sts_value):\n for index in range(0, len(sts_value)):\n if sts_value[index] == ds_stripped_value[index]:\n temp_match += 1\n calculated_ratio = temp_match/float(sts_value)\n elif len(ds_stripped_value) < len(sts_value):\n for index in range(0, len(ds_stripped_value)):\n if sts_value[index] == ds_stripped_value[index]:\n temp_match += 1\n calculated_ratio = temp_match/float(ds_stripped_value)\n if ratio < calculated_ratio:\n ratio = calculated_ratio\n stripped_value = sts_value\n if ratio * 100 > 80:\n return {'scannerId': ds_scanner_id, 'strippedValue': stripped_value, 'ratio': ratio}\n else:\n return {}\n\n def __get_similar_match(self, ds_sc_original, ds_scanner_id):\n ds_stripped_value = Utilities.get_stripped_digit(ds_sc_original)\n ratio = 0.0\n stripped_value = ''\n if ds_stripped_value:\n for sts_value in self.__local_sts_scanner_stripped_dict[ds_scanner_id]:\n calculated_ratio = SequenceMatcher(None, ds_stripped_value, sts_value).ratio()\n if ratio < calculated_ratio:\n ratio = calculated_ratio\n stripped_value = sts_value\n if ratio * 100 > 80:\n return {'scannerId': ds_scanner_id, 'strippedValue': stripped_value, 'ratio': ratio}\n else:\n return {}\n\n def search_ds_sheet(self):\n collection = self.__db.disbursement_sheets\n for ds_data in collection.find(\n {'district': {'$in': ['RANGPUR']}, 'upazilla': {'$in': ['PIRGONJ']}},\n {'_id': 0, 'schoolCode': 1, 'district': 1, 'upazilla': 1, 'status': 1, 'schoolName': 1,\n 'scannerId': 1}):\n if ds_data['status'] == 'PROCESSED':\n # temp = {'District': ds_data['district'], 'Thana': ds_data['upazilla'],\n # 'SchoolCodeOriginal': ds_data['schoolCode']['value']['original'],\n # 'SchoolCodeModified': '',\n # 'SchoolName': ''}\n temp = {'DSScOri': ds_data['schoolCode']['value']['original'], 'DSScModi': '', 'DSScName': ''}\n else:\n temp = {'DSScOri': ds_data['schoolCode']['value']['original'],\n 'DSScModi': ds_data['schoolCode']['value']['modified'], 'DSScName': ds_data['schoolName']}\n\n # similar_match_dict = self.__get_similar_match(ds_data['schoolCode']['value']['original'],\n # ds_data['scannerId'])\n similar_match_dict = self.__get_manual_similar_match(ds_data['schoolCode']['value']['original'],\n ds_data['scannerId'])\n if similar_match_dict:\n # temp['stsScannerId'] = similar_match_dict['scannerId']\n temp['ratio'] = similar_match_dict['ratio']\n for sts_local_data in self.__local_sts_data_list[similar_match_dict['strippedValue']]:\n temp['stsScName'] = sts_local_data['scName']\n temp['stsScOri'] = sts_local_data['scOriginal']\n temp['stripped'] = similar_match_dict['strippedValue']\n self.__result_data_list.append(temp)\n\n def add_data_to_worksheet(self, worksheet):\n row = 0\n for data in self.__result_data_list:\n column = 0\n worksheet.write(row, column, data['DSScModi'])\n column += 1\n worksheet.write(row, column, data['DSScOri'])\n column += 1\n worksheet.write(row, column, data['stsScOri'])\n column += 1\n worksheet.write(row, column, data['DSScName'])\n column += 1\n worksheet.write(row, column, data['stsScName'])\n column += 1\n worksheet.write(row, column, data['ratio'])\n column += 1\n worksheet.write(row, column, data['stripped'])\n\n row += 1\n\n def create_excel(self):\n self.__get_non_modified_local_sts_data()\n self.search_ds_sheet()\n reportOutputDirectoryName = 'ds-sts-1-2-char-mismatch-mapping'\n if not os.path.exists(reportOutputDirectoryName):\n os.makedirs(reportOutputDirectoryName)\n file_name = reportOutputDirectoryName + \"/ds-stsMapping-Report\" + str(time.strftime(\"%d%m%Y\")) + str(\n time.strftime(\"%I%M%S\") + \".xlsx\")\n workbook = xlsxwriter.Workbook(file_name)\n worksheet = workbook.add_worksheet('DS-STS-MAPPING')\n self.add_data_to_worksheet(worksheet)\n workbook.close()\n print('Excel Created')\n\n\nstart = datetime.datetime.now()\nobj = DSStsPartialMapping()\nobj.create_excel()\nprint('elapsed-time: ' + str(datetime.datetime.now() - start))\n","sub_path":"ds_sts_mapping/ds_sts_1_2_chars_mismatch.py","file_name":"ds_sts_1_2_chars_mismatch.py","file_ext":"py","file_size_in_byte":6785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"585386901","text":"from django.conf.urls import url\nfrom finance.views import hello_world, charges, add_charge, get_charge, accounts, get_account, add_account, account\n\nurlpatterns = [\n\turl(r'^charges/$', charges),\n\turl(r'^accounts/$', accounts),\n\turl(r'^add_charge/(?P\\d+)$', add_charge),\n\turl(r'^get_charge/(?P\\d+)$', get_charge),\n\turl(r'^add_account$', add_account),\n\turl(r'^get_account/$', get_account),\n\turl(r'^accounts/get/(?P\\d+)/', account),\n url(r'^$', hello_world),\n]\n","sub_path":"finance/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"83257270","text":"import json\nimport numpy as np\nfrom statistics import mean, stdev, median, variance\nimport datetime\nfrom DatabaseModel import DatabaseModel\nfrom scipy.interpolate import interp1d\nfrom helpers.OperationHelper import OperationHelper\nfrom algorithms.KeyboardMappingModel import KeyboardMappingModel\nfrom collector.KeystrokeParser import KeystrokeParser\nfrom sklearn.metrics import mean_squared_error\n\n\nclass RecognitionModel(object):\n \"\"\"\n :logs_dir - users db created based on logs in directory\n :json_db_file - users db created based on json file\n \"\"\"\n def __init__(self, logs_dir=None, json_db_file=None):\n self.logs_dir = logs_dir if logs_dir else None\n self.json_db_file = json_db_file if json_db_file else None\n self.users_db = self.prepare_users_database()\n self.users_statistics = dict()\n self.users_interpolation = dict()\n\n def prepare_users_database(self):\n if self.logs_dir:\n database_model = DatabaseModel(logs_dir=self.logs_dir)\n return database_model.get_database()\n elif self.json_db_file:\n try:\n with open(self.json_db_file) as json_db_file:\n json_db = json_db_file.read()\n return json.loads(json_db)\n except IOError:\n print(\"File not found: \" + str(self.json_db_file))\n\n def calculate_statistics_for_users(self):\n for user in self.users_db.keys():\n self.users_statistics[user] = dict()\n for stat in self.users_db[user].keys():\n self.users_statistics[user][stat] = dict()\n for element in self.users_db[user][stat]:\n if len(self.users_db[user][stat][element]) >= 2:\n self.users_statistics[user][stat][element] = list(map(float,self.users_db[user][stat][element]))\n _mean = mean(self.users_statistics[user][stat][element])\n _median = median(self.users_statistics[user][stat][element])\n _variance = variance(self.users_statistics[user][stat][element])\n _stddev = stdev(self.users_statistics[user][stat][element])\n self.users_statistics[user][stat][element] = [str(_mean), str(_median), str(_stddev), str(_variance),\n [str(_mean - _stddev), str(_mean + _stddev)],\n [str(_median - _stddev), str(_median + _stddev)],\n OperationHelper.calculate_confidence_interval(\n self.users_statistics[user][stat][element])]\n\n def calculate_interpolation(self):\n for user in self.users_db.keys():\n self.users_interpolation[user] = dict()\n for stat in self.users_db[user].keys():\n self.users_interpolation[user].update({stat: {}})\n for element in self.users_db[user][stat]:\n if len(self.users_db[user][stat][element]) > 3:\n if element not in self.users_interpolation[user][stat]:\n self.users_interpolation[user][stat][element] = []\n if element in self.users_interpolation[user][stat]:\n samples = np.linspace(0, len(self.users_db[user][stat][element]) - 1,\n num=len(self.users_db[user][stat][element]),\n endpoint=True)\n interpol_fun_linear = interp1d(samples,\n list(map(float, self.users_db[user][stat][element])),\n kind='linear')\n interpol_fun_cubic = interp1d(samples,\n list(map(float, self.users_db[user][stat][element])),\n kind='cubic')\n xnew = np.linspace(0, len(self.users_db[user][stat][element]) - 1, num=100, endpoint=True)\n self.users_interpolation[user][stat][element] = list(map(str,interpol_fun_cubic(xnew)))\n\n def get_db_stats(self):\n return self.users_statistics\n\n def get_user_stats(self, user, statistics=False):\n if statistics is True:\n return self.users_statistics[user]\n else:\n return self.users_db[user]\n\n def save_stats(self, filename):\n with open(\"./\" + str(filename) +\n str(datetime.datetime.now().strftime(\"_%Y-%m-%d_%H%M%S\") + \"_stats.json\"), \"w\") as jsonDB:\n jsonDB.write(str(self.users_statistics).replace(\"\\'\", \"\\\"\"))\n print(\"Statistics for user to json file\")\n\n def save_interpolation(self, filename):\n with open(\"./\" + str(filename) +\n str(datetime.datetime.now().strftime(\"_%Y-%m-%d_%H%M%S\") + \"_interpolation.json\"), \"w\") as jsonDB:\n jsonDB.write(str(self.users_interpolation).replace(\"\\'\", \"\\\"\"))\n print(\"Interpolation data for user to json file\")\n\n def map_keyboard(self, user_data):\n keyboard_mapping_model = KeyboardMappingModel(user_data=user_data)\n keyboard_mapping_model.calculate_cross_combination()\n moves = keyboard_mapping_model.calculate_moves_estimation()\n keyboard_mapping_model.update_user_data(moves)\n print(keyboard_mapping_model.check_biograms())\n keyboard_map = keyboard_mapping_model.inter_zones_moves_analysis()\n keyboard_mapping_model.update_user_data(keyboard_map)\n return keyboard_mapping_model.user_data\n\n def check_keys_match(self, user_data, parameter):\n # check % which is matched to db data\n matched_result = {user: {} for user in self.users_db.keys()}\n for user in self.users_db.keys():\n matched_sum = 0\n letter_counter = 0\n for letter in user_data.data[parameter]:\n if letter in self.users_statistics[user][parameter] and \\\n letter in user_data.user_statistics[parameter]:\n letter_counter = letter_counter + 1\n if self.users_statistics[user][parameter][letter][4][0] <= \\\n user_data.user_statistics[parameter][letter][0] <= \\\n self.users_statistics[user][parameter][letter][4][1]:\n matched_sum = matched_sum + 1\n if self.users_statistics[user][parameter][letter][5][0] <= \\\n user_data.user_statistics[parameter][letter][1] <= \\\n self.users_statistics[user][parameter][letter][5][1]:\n matched_sum = matched_sum + 1\n # if self.users_statistics[user][parameter][letter][6][0] <= \\\n # user_data.user_statistics[parameter][letter][1] <= \\\n # self.users_statistics[user][parameter][letter][6][1]:\n # matched_sum = matched_sum + 1\n if letter_counter > 0:\n matched_result[user].update({letter: float(matched_sum)/float(2*letter_counter)})\n return matched_result\n\n def calculate_user_fit(self, user_data, parameter):\n matched_result = self.check_keys_match(user_data=user_data, parameter=parameter)\n fit_result = {}\n for user in matched_result.keys():\n fit_sum = 0\n for letter in matched_result[user].keys():\n fit_sum = fit_sum + matched_result[user][letter]\n fit_result.update({user: fit_sum / len(matched_result[user])})\n return fit_result\n\n def check_keystroke_match(self, user_data):\n matched_result = {user: None for user in self.users_db.keys()}\n for user in self.users_statistics.keys():\n if \"rate [keys/min]\" in self.users_statistics[user][\"general_keystroke_info\"]:\n if self.users_statistics[user][\"general_keystroke_info\"][\"rate [keys/min]\"][4][0] <= \\\n user_data.user_statistics[\"general_keystroke_info\"][\"rate [keys/min]\"][0] <= \\\n self.users_statistics[user][\"general_keystroke_info\"][\"rate [keys/min]\"][4][1]:\n matched_result.update({\n user: [1, float(user_data.user_statistics[\"general_keystroke_info\"][\"rate [keys/min]\"][0]) -\n float(self.users_statistics[user][\"general_keystroke_info\"][\"rate [keys/min]\"][0])]})\n else:\n matched_result.update(\n {user: [0, float(user_data.user_statistics[\"general_keystroke_info\"][\"rate [keys/min]\"][0]) -\n float(self.users_statistics[user][\"general_keystroke_info\"][\"rate [keys/min]\"][0])]})\n return matched_result\n\n def calculate_mean_square_error(self, user_data, parameter):\n matched_result = {user: {} for user in self.users_db.keys()}\n for user in self.users_db.keys():\n for letter in user_data.data[parameter]:\n if letter in self.users_db[user][parameter] and \\\n letter in user_data.data[parameter]:\n if len(user_data.data[parameter][letter]) >= 1 and len(self.users_db[user][parameter][letter]) >= 1:\n if len(user_data.data[parameter][letter]) <= len(self.users_db[user][parameter][letter]):\n from_db = self.users_db[user][parameter][letter][0:len(user_data.data[parameter][letter])]\n matched_result[user].update(\n {letter: mean_squared_error(list(map(float, from_db)),\n list(map(float, user_data.data[parameter][letter])))})\n matched_result = {user: sum(matched_result[user].values())/len(matched_result[user].values())\n for user in matched_result.keys()}\n return matched_result\n\n def predict(self, user_data):\n result = {'time_stats': dict(),\n 'mean_square_error': dict(),\n 'keystroke': dict()}\n user_data.calculate_statistics_for_user()\n result['time_stats'].update({'1': self.calculate_user_fit(user_data=user_data, parameter=\"time_pressed\")})\n result['time_stats'].update({'2': self.calculate_user_fit(user_data=user_data, parameter=\"time_between_keys\")})\n result['time_stats'].update({'3': self.calculate_user_fit(user_data=user_data,\n parameter=\"time_between_keys_down_down\")})\n result['keystroke'].update({'1': self.check_keystroke_match(user_data=user_data)})\n\n result['mean_square_error'].update(\n {'1': self.calculate_mean_square_error(user_data=user_data, parameter=\"time_pressed\")})\n result['mean_square_error'].update(\n {'2': self.calculate_mean_square_error(user_data=user_data, parameter=\"time_between_keys\")})\n result['mean_square_error'].update(\n {'3': self.calculate_mean_square_error(user_data=user_data, parameter=\"time_between_keys_down_down\")})\n print(result)\n return result\n\n# rm = RecognitionModel(json_db_file=\"./testDB_2019-03-21_185244_merged.json\")\n# #result = rm.calculate_user_fit(user_data=rm.get_user_stats(user=\"kamil\")['time_between_keys'])\n# result = rm.calculate_mean_square_error(user_data=rm.get_user_stats(user=\"kamil\"), parameter ='time_between_keys')\n# print(\"\\n\")\n# print(result)\n# rm = RecognitionModel(json_db_file=\"./testDB_merged_2019-05-04_154219_merged.json\")\n# rm.check_keystroke_match(user_data=\"kamil\")\n# rm.map_keyboard(user_data=rm.get_user_stats(user=\"jurek\")['time_between_keys'])\n# rm.calculate_interpolation()\n# rm.save_interpolation(filename=\"db_interp\")\n# rm.save_stats(filename=\"stats\")\n# rm = RecognitionModel(logs_dir=\"./time_logs\")\n\n","sub_path":"RecognitionModel.py","file_name":"RecognitionModel.py","file_ext":"py","file_size_in_byte":12212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"550519517","text":"import json\nimport pymongo\nimport flask\nfrom flask import Flask, request\nimport socketio\nimport apscheduler.schedulers.background\n\napp = Flask(__name__)\nsio = socketio.Server()\napp.wsgi_app = socketio.WSGIApp(sio, app.wsgi_app)\n\navail_rider = []\navail_driver = []\n\n\ndef get_distance(p1, p2):\n temp = pow((p1[0] - p2[0]), 2) + pow((p1[1] - p2[1]), 2)\n return pow(temp, 0.5)\n\n\ndef client_match():\n if not avail_driver:\n return\n for r in avail_rider:\n rider = json.loads(r)\n mini = 50000\n sel_driver = None\n sel_driverF = None\n for d in avail_driver:\n driver = json.loads(d)\n if get_distance(rider[\"loc\"], driver[\"loc\"]) < mini:\n sel_driverF = d\n sel_driver = driver\n\n fare = get_distance(rider['loc'], rider['des']) * 2\n\n notification = {'r_name': rider['name'], 'd_name': sel_driver['name'], 'fare': fare}\n # print(notification)\n print(\"Server has paired rider %s with driver %s\" % (rider['name'], sel_driver['name']))\n sio.emit(\"notify\", notification)\n\n avail_rider.remove(r)\n avail_driver.remove(sel_driverF)\n\n\nschedule = apscheduler.schedulers.background.BackgroundScheduler()\nschedule.add_job(func=client_match, trigger=\"interval\", seconds=5)\nschedule.start()\n\n\n@app.route(\"/rider\", methods=[\"GET\", \"POST\"])\ndef rider_update():\n data = request.json\n avail_rider.append(data)\n return flask.Response(status=201)\n\n\n@app.route(\"/driver\", methods=[\"GET\", \"POST\"])\ndef driver_update():\n data = request.json\n avail_driver.append(data)\n return flask.Response(status=201)\n\n\n@app.route(\"/rate\", methods=[\"GET\", \"POST\"])\ndef rating():\n data = request.json\n myclient = pymongo.MongoClient(\"mongodb://127.0.0.1:27017/\")\n mydb = myclient[\"gorib_uberdb\"]\n mycol = mydb[\"ratings\"]\n mydict = json.loads(data)\n print(\"rider %s gave rating %d to driver %s\" % (mydict['rname'], mydict['rate'], mydict['dname']))\n x = mycol.insert_one(mydict)\n\n return flask.Response(status=201)\n\n\nif __name__ == \"__main__\":\n # sio.run(app, debug=True, port=5000, use_reloader=False)\n app.run(host=\"127.0.0.1\", port=5000)","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"486138864","text":"import random\nfrom jgomas.CService import CService\n\n\nclass CRegistry:\n\n MAX_TOTAL_SERVICES = 100\n\n def __init__(self):\n self.m_ServiceTypes = list()\n self.m_ServiceList = list()\n\n def RegisterService(self, _sServiceType, _bKeyCode=True):\n\n for i in self.m_ServiceList:\n if _sServiceType in i.m_sDFType:\n print(\"Service registered earlier: \" + _sServiceType)\n return i\n\n # If we are here, we haven't found any match\n Service = CService()\n\n sKeyName = \"\"\n sKeyType = \"\"\n\n if _bKeyCode:\n sKeyName = str(random.randint(0, 9999))\n sKeyType = str(random.randint(0, 9999))\n\n Service.m_sDFName = _sServiceType + sKeyName\n Service.m_sDFType = _sServiceType + sKeyType\n\n self.m_ServiceList.append(Service)\n self.m_ServiceTypes.append(_sServiceType)\n print(\"Registry - Service Registered: \" + _sServiceType)\n\n return Service\n","sub_path":"jgomas/CRegistry.py","file_name":"CRegistry.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"418527790","text":"import pandas as pd\n\n# define the source excel file's path\nexcelFile = 'test.xlsx'\n# use pandas to read excel file\nsourceData = pd.read_excel(excelFile)\n# change column name into a list\ncolumnName = list(sourceData.columns) \n\n# print all the list name\ni = 1\nwhile i < (len(columnName)+1):\n print (\"列号:%s 列名:%s\" %(i,columnName[i-1]))\n i+=1\n\n# get user's choice (number) and print the name \nmatchColumnNumber = int(float(input(\"请输入作为匹配的列号:\")))\nmatchColumnNumber-=1 \nprint (columnName[matchColumnNumber])\nfillColumnNumber = int(float(input(\"请输入需要填充的列号:\")))\nfillColumnNumber-=1\nprint (columnName[fillColumnNumber])\n\n# print a cut-off line\n#print (\"\\n\\n================================\\n\\n================================\\n\\n\")\n\n# use isnull to judge the null data in sourcedata\nNaNdf = sourceData.isnull()\n\ni = 0\nwhile i m:\n\t\tprint(\"Invalid!\")\n\telse:\n\t\tfor i in range(1,m+1):\n\t\t\tif i%n == 0:\n\t\t\t\tprint(i)\n\tprint()","sub_path":"Basic Programming/Dimik oj/Python3/প্রোগ্রামিং সমস্যা 32 - [৫২ সমস্যা বই] X এর গুণিতক.py","file_name":"প্রোগ্রামিং সমস্যা 32 - [৫২ সমস্যা বই] X এর গুণিতক.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"574703606","text":"from __future__ import division\r\nimport os\r\nimport re\r\nimport numpy as np\r\nfrom difflib import SequenceMatcher\r\nimport pdftotext\r\n\r\ndef removeHeaderAndFooter(pdf):\r\n\r\n fullPdf = []\r\n for i in range(len(pdf)):\r\n if (pdf[i].strip() != ''):\r\n fullPdf.append(pdf[i].split('\\n'))\r\n if (len(fullPdf) == 1):\r\n return fullPdf\r\n # Remove header\r\n row = 0\r\n continueRemove = True\r\n while (True):\r\n for i in range(len(fullPdf) - 1):\r\n if SequenceMatcher(None, ''.join(fullPdf[0][row].split()), ''.join(fullPdf[i+1][row].split())).ratio() < 0.8:\r\n continueRemove = False\r\n break\r\n if (continueRemove):\r\n for i in range(len(fullPdf)):\r\n del(fullPdf[i][row])\r\n else:\r\n break\r\n\r\n # Remove footer\r\n continueRemove = True\r\n while (True):\r\n row = [len(page)-1 for page in fullPdf]\r\n for i in range(len(fullPdf) - 1):\r\n if SequenceMatcher(None, ''.join(fullPdf[0][row[0]].split()), ''.join(fullPdf[i+1][row[i+1]].split())).ratio() < 0.8:\r\n continueRemove = False\r\n break\r\n if (continueRemove):\r\n for i in range(len(fullPdf)):\r\n del(fullPdf[i][row[i]])\r\n else:\r\n break\r\n return fullPdf;\r\n\r\ndef preProcessPdf(filename):\r\n # for filename in file:\r\n # Covert PDF to string by page\r\n # print(filename)\r\n\r\n with open(filename, \"rb\") as f:\r\n pdf = pdftotext.PDF(f)\r\n # Remove header & footer\r\n # print(len(pdf))\r\n if (len(pdf) > 1):\r\n fullPdf = removeHeaderAndFooter(pdf)\r\n # Join PDF\r\n for i in range(len(pdf)):\r\n with open(filename[:-4] + '_' + str(i) + \".txt\", \"w+\") as f:\r\n for line in fullPdf[i]:\r\n f.write(line + '\\n')\r\n fullPdf = [line for page in fullPdf for line in page]\r\n else:\r\n fullPdf = pdf[0].split('\\n')\r\n return fullPdf\r\n\r\nif __name__ == '__main__':\r\n file = os.listdir()\r\n file = list(filter(lambda ef: ef[0] != \".\" and ef[-3:] == \"pdf\", file))\r\n # file = [\"SBL_FDS_FDSLSGN190223OS.190219164902.pdf\"]\r\n for filename in file:\r\n fullPdf = preProcessPdf(filename)\r\n\r\n if (fullPdf[0] != \"\"):\r\n with open(filename[:-3]+\"txt\", \"w+\") as f:\r\n for line in fullPdf:\r\n f.write(line + '\\n')\r\n","sub_path":"backup/demo0.py","file_name":"demo0.py","file_ext":"py","file_size_in_byte":2420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"248357647","text":"\"\"\"\ndlibとopenCV使って顔器官検出\nhttps://qiita.com/kekeho/items/0b2d4ed5192a4c90a0ac\n\"\"\"\n\nimport cv2\nimport dlib\nimport numpy as np\nimport os\n\n# Cascade files directory path\nCASCADE_PATH = os.path.dirname(os.path.abspath(__file__)) + \"/haarcascades/\"\npredictor = dlib.shape_predictor(\n os.path.dirname(os.path.abspath(__file__))+\"/shape_predictor_68_face_landmarks.dat\")\nface_cascade = cv2.CascadeClassifier(\n CASCADE_PATH + 'haarcascade_frontalface_default.xml')\n\n\ndef face_position(gray_img):\n \"\"\"Detect faces position\n Return:\n faces: faces position list (x, y, w, h)\n \"\"\"\n faces = face_cascade.detectMultiScale(gray_img, minSize=(100, 100))\n return faces\n\n\ndef facemark(gray_img):\n faces_roi = face_position(gray_img)\n landmarks = []\n for face in faces_roi:\n x, y, w, h = face\n face_img = gray_img[y: y + h, x: x + w];\n detector = dlib.get_frontal_face_detector()\n rects = detector(gray_img, 1)\n landmarks = []\n for rect in rects:\n landmarks.append(np.array([[p.x, p.y] for p in predictor(gray_img, rect).parts()]))\n return landmarks\n\ndef main(image_path, output_path):\n img = cv2.imread(image_path)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n landmarks = facemark(gray)\n\n os.makedirs(f\".{output_path.split('.')[1]}/\", exist_ok=True)\n color = (255,0,0)\n\n for i, landmark in enumerate(landmarks):\n #新しい配列に入力画像の一部を代入\n dst = img[landmark[29][1]:landmark[32][1], landmark[17][0]:landmark[40][0]]\n #書き出し\n cv2.imwrite(f\".{output_path.split('.')[1]}/{i+1}_left_cheek.jpg\", dst)\n\n dst = img[landmark[29][1]:landmark[34][1], landmark[43][0]:landmark[26][0]]\n cv2.imwrite(f\".{output_path.split('.')[1]}/{i+1}_right_cheek.jpg\",dst)\n\n dst = img[(landmark[19][1] - (landmark[37][1]-landmark[19][1])) : landmark[24][1], landmark[19][0] : landmark[24][0]]\n cv2.imwrite(f\".{output_path.split('.')[1]}/{i+1}_amount.jpg\",dst)\n\n cv2.rectangle(img, (landmark[17][0], landmark[29][1]), (landmark[40][0], landmark[32][1]), color, thickness=2)\n cv2.rectangle(img, (landmark[43][0], landmark[29][1]), (landmark[26][0], landmark[34][1]), color, thickness=2)\n cv2.rectangle(img, (landmark[19][0], landmark[19][1] - (landmark[37][1]-landmark[19][1])), (landmark[24][0], landmark[24][1]), color, thickness=2)\n cv2.imwrite(output_path, img)\n\n\nif __name__ == '__main__':\n main(\"./inputs/test.jpg\", \"./outputs/test.jpg\")","sub_path":"detection.py","file_name":"detection.py","file_ext":"py","file_size_in_byte":2540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"202223942","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# 你是一个专业的小偷,计划偷窃沿街的房屋。每间房内都藏有一定的现金,影响你偷窃的唯一制约因素就是相邻的房屋装有相互连通的防盗系统,如果两间相邻的房屋在同一晚上\n# 被小偷闯入,系统会自动报警。\n#\n# 给定一个代表每个房屋存放金额的非负整数数组,计算你 不触动警报装置的情况下 ,一夜之内能够偷窃到的最高金额。\n#\n#\n#\n# 示例 1:\n#\n#\n# 输入:[1,2,3,1]\n# 输出:4\n# 解释:偷窃 1 号房屋 (金额 = 1) ,然后偷窃 3 号房屋 (金额 = 3)。\n# 偷窃到的最高金额 = 1 + 3 = 4 。\n#\n# 示例 2:\n#\n#\n# 输入:[2,7,9,3,1]\n# 输出:12\n# 解释:偷窃 1 号房屋 (金额 = 2), 偷窃 3 号房屋 (金额 = 9),接着偷窃 5 号房屋 (金额 = 1)。\n# 偷窃到的最高金额 = 2 + 9 + 1 = 12 。\n#\n#\n#\n#\n# 提示:\n#\n#\n# 1 <= nums.length <= 100\n# 0 <= nums[i] <= 400\n#\n# Related Topics 数组 动态规划 👍 1631 👎 0\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\n\n\nfrom typing import List\n\n\nclass Solution:\n def rob(self, nums: List[int]) -> int:\n # 状态 到第I 位置,的最大金额\n size = len(nums)\n\n dp = [0] * (size + 1)\n dp[1] = nums[0]\n\n for i in range(2, size + 1):\n dp[i] = max(dp[i - 1], dp[i - 2] + nums[i - 1])\n\n return dp[-1]\n\n# leetcode submit region end(Prohibit modification and deletion)\n","sub_path":"算法分析和归类/动态规划/打家劫舍.py","file_name":"打家劫舍.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"488124193","text":"#!/usr/bin/env python\n\n\"\"\"transcribe audio segments\n\nThis script has been thrown together after a few days research to convert audio\nfiles (wavs) containing speech, and transcribe them using the offline\nspeech-to-text engine PocketSphinx. It uses auditok to identify the regions of\ninterest.\n\nIt uses the `SpeechRecognition` package to perform the speech transcribing so\nit could be easily extended to use a more accurate engine.\n\nJSON output with a breakdown of each audio segment is returned to STDOUT with\nenough metadata to be useful and sentiment analysis from `textblob`.\n\n\"\"\"\n\nimport json\nimport speech_recognition as sr\nimport sys\nfrom auditok import ADSFactory, AudioEnergyValidator, StreamTokenizer\nfrom backports import tempfile\nfrom pathlib import Path\nfrom pydub import AudioSegment\nfrom six.moves import urllib\nfrom textblob import TextBlob\n\ndef transcribe_audio(file, unpackdir):\n path = Path(file)\n\n tempsound = AudioSegment.from_wav(file)\n tempsound = tempsound.set_channels(1)\n tmpfile = \"{}/0wavtmp_{}\".format(unpackdir, path.name)\n tempsound.export(tmpfile, format=\"wav\")\n\n # We set the `record` argument to True so that we can rewind the source\n asource = ADSFactory.ads(filename=tmpfile, record=True)\n\n validator = AudioEnergyValidator(sample_width=asource.get_sample_width(), energy_threshold=50)\n\n # Default analysis window is 10 ms (float(asource.get_block_size()) / asource.get_sampling_rate())\n # min_length=20 : minimum length of a valid audio activity\n # max_length=500 : maximum length of a valid audio activity\n # max_continuous_silence=30 : maximum length of a tolerated silence within valid audio activity window\n tokenizer = StreamTokenizer(validator=validator, min_length=20, max_length=500, max_continuous_silence=30)\n\n asource.open()\n tokens = tokenizer.tokenize(asource)\n r = sr.Recognizer()\n\n json_output = {\n \"segments\": []\n }\n\n for index,t in enumerate(tokens):\n # print(\"Token starts at {0} and ends at {1}\".format(t[1] * 10, t[2] * 10))\n newAudio = AudioSegment.from_wav(file)\n newAudio = newAudio[t[1] * 10:t[2] * 10]\n\n chunk_name = \"{}/{}_clip{}.wav\".format(unpackdir, path.stem, index)\n # print(\"Generating\", chunk_name)\n newAudio.export(chunk_name, format=\"wav\")\n with sr.AudioFile(chunk_name) as source:\n audio = r.record(source)\n\n transcription = {\n \"start\": t[1] * 10,\n \"end\": t[2] * 10\n }\n\n # recognize speech using Sphinx\n try:\n transcription[\"text\"] = r.recognize_sphinx(audio)\n except sr.UnknownValueError:\n transcription[\"text\"] = \"\"\n transcription[\"error\"] = True\n sys.stderr.write(\"Sphinx could not understand audio\")\n except sr.RequestError as e:\n sys.stderr.write(\"Sphinx error; {0}\".format(e))\n\n tb = TextBlob(transcription[\"text\"])\n transcription[\"sentiment\"] = {\n \"polatirty\": tb.polarity,\n \"subjectivity\": tb.subjectivity\n }\n\n json_output[\"segments\"].append(transcription)\n\n # json_output['segments'].append({\n # \"start\": t[1] * 10,\n # \"end\": t[2] * 10,\n # \"transcription\": transcription,\n # \"transcription_error\": transcription_error,\n # \"sentiment\": {\n # \"polatirty\": tb.polarity,\n # \"subjectivity\": tb.subjectivity\n # }\n # })\n\n return json_output\n\nif len(sys.argv) < 2:\n print(\"You failed to provide a filename to tokenize\")\n sys.exit(1)\n\n# uri handles different schemes (http, file etc.)\nuri = sys.argv[1]\n\n# tempdir is cleaned up after the program exits\ntempdir = tempfile.TemporaryDirectory()\n\nfilename = \"{}/audio.wav\".format(tempdir.name)\nurllib.request.urlretrieve(uri, filename)\n\n# outputs\ntranscription = transcribe_audio(filename, tempdir.name)\nprint(json.dumps(transcription))\n","sub_path":"tokenize_and_transcribe.py","file_name":"tokenize_and_transcribe.py","file_ext":"py","file_size_in_byte":3988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"638752850","text":"from mlm.scorers import MLMScorer, MLMScorerPT, LMScorer\nfrom mlm.models import get_pretrained\nimport mxnet as mx\nimport torch\nfrom transformers import AutoModel, AutoTokenizer\nimport numpy as np\n\nctxs = [mx.cpu()] # or, e.g., [mx.gpu(0), mx.gpu(1)]\n\nsentence = 'confirms HTTPURL via @USER :cry:'\n\nprint('Checking original MLM library..')\n# MXNet MLMs (use names from mlm.models.SUPPORTED_MLMS)\nmodel, vocab, tokenizer = get_pretrained(ctxs, 'bert-base-en-cased')\n\n#print(type(vocab).__name__)\nscorer = MLMScorer(model, vocab, tokenizer, ctxs)\nprint(scorer.score_sentences([sentence]))\n# >> [-12.410664200782776]\nprint(scorer.score_sentences([sentence], per_token=True))\n# >> [[None, -6.126736640930176, -5.501412391662598, -0.7825151681900024, None]]\n\nprint('Done. Checking extension..')\n# Load the AutoTokenizer with a normalization mode if the input Tweet is raw\ntokenizer = AutoTokenizer.from_pretrained(\"vinai/bertweet-base\", normalization=True)\n\nbertweet, vocab, tokenizer = get_pretrained(ctxs, 'vinai/bertweet-base-en-cased')\n\n#print(BERTVocab(tokenizer.vocab_file))\n\ntweetscorer = MLMScorerPT(bertweet, None, tokenizer, ctxs)\n\nprint(tweetscorer.score_sentences([sentence]))\n\nprint(tweetscorer.score_sentences([sentence], per_token=True))\n\nprint('Done.')\n","sub_path":"src/bertweet_likelihood_example.py","file_name":"bertweet_likelihood_example.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"614385104","text":"import requests\nfrom lxml import etree\nimport time\nfrom ProxyDatabase.dbclient import DBClient\nimport sys\nimport os\n\nsys.path.append(os.path.dirname(os.path.abspath(__file__)))\n\nclass JiangXianLiClient(object):\n\n def __init__(self,pages=24):\n self.headers = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n 'Connection': 'keep-alive',\n 'Host': 'ip.jiangxianli.com',\n 'Referer': 'https://ip.jiangxianli.com/?page=2',\n 'Sec-Fetch-Mode': 'navigate',\n 'Sec-Fetch-Site': 'same-origin',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36'\n }\n self.pages = pages\n self.db = DBClient()\n for page in range(1,self.pages+1):\n self.fetch_proxy(page)\n time.sleep(5)\n\n def fetch_proxy(self,page):\n url = 'https://ip.jiangxianli.com/?page={page}'.format(page=str(page))\n self.headers['Referer'] = url\n response = requests.get(url,headers=self.headers)\n text = response.text\n html = etree.HTML(text)\n trs = html.xpath('//table[@class=\"layui-table\"]/tbody/tr')\n for tr in trs:\n ip = tr.xpath('./td[1]/text()')[0].replace('\\t','').replace('\\n','')\n port = tr.xpath('./td[2]/text()')[0].replace('\\t','').replace('\\n','')\n self.db.run(self.db.add(ip+':'+port))","sub_path":"ProxyGetter/jiangxianliclient.py","file_name":"jiangxianliclient.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"205336030","text":"import os\nimport argparse\nimport pathlib\nimport glob\nimport re\n# pdfminer(pip install pdfminer3k)\nfrom pdfminer.pdfparser import PDFParser\nfrom pdfminer.pdfparser import PDFDocument\n# olefile(pip install olefile)\nimport olefile\nimport win32com.client\n\n##################################\n# 関数\n##################################\n# pdfファイル\ndef cnvPdf(srcPath, dstPath):\n # fp = open(srcPath, 'wb')\n # parser = PDFParser(fp)\n # doc = PDFDocument(parser)\n # parser.set_document(doc)\n # doc.set_parser(parser)\n # print(doc.info[0][\"Category\"])\n # print(doc.info[0][\"Keywords\"])\n return True\n# wordファイル\ndef cnvWord(srcPath, dstPath):\n # doc = win32com.client.gencache.EnsureDispatch(\"Word.Application\")\n # doc.Visible = False # アプリで開かない\n # doc.DisplayAlerts = False # 警告OFF\n # try:\n # doc_file = doc.Documents.Open(srcPath, False, True) # 変換ダイアログ非表示、読み取り専用で開く\n # for s in doc_file.Sentences:\n # pass\n # doc_file.Close()\n # finally:\n # doc.Quit()\n return True\ndef cnvExcel(srcPath, dstPath):\n return True\ndef cnvPPoint(srcPath, dstPath):\n return True\ndef cnvText(srcPath, dstPath):\n return True\ndef cnvOther(srcPath, dstPath):\n return True\ndef isOutlookData(suffix):\n if suffix == \".pst\":\n return True\n else:\n return False\ndef isExecutable(suffix):\n if suffix == \".bat\" or \\\n suffix == \".cmd\" or \\\n suffix == \".com\" or \\\n suffix == \".cpl\" or \\\n suffix == \".desklink\" or \\\n suffix == \".exe\" or \\\n suffix == \".hta\" or \\\n suffix == \".lnk\" or \\\n suffix == \".mapmai\" or \\\n suffix == \".pif\" or \\\n suffix == \".scr\" or \\\n suffix == \".shs\" or \\\n suffix == \".url\" or \\\n suffix == \".vbs\":\n return True\n else:\n return False\ndef cnvFile(srcPath, dstDir, dstPath, suffix):\n if os.path.exists(dstDir) == False:\n os.makedirs(dstDir)\n if suffix == \".pdf\":\n # PDFファイル\n pass\n if suffix == \".xls\" or suffix == \".xlsx\":\n # Excelファイル\n ret = cnvWord(srcPath, dstPath)\n elif suffix == \".doc\" or suffix == \".docx\":\n # Wordファイル\n ret = cnvWord(srcPath, dstPath)\n elif suffix == \".ppt\" or suffix == \".pptx\":\n # PowerPointファイル\n ret = cnvPPoint(srcPath, dstPath)\n elif suffix == \".txt\":\n # テキストファイル\n ret = cnvText(srcPath, dstPath)\n elif isOutlookData(suffix):\n # アウトルックデータファイル\n ret = False\n elif isExecutable(suffix):\n # 実行可能ファイル\n ret = False\n else:\n # その他のファイル\n ret = cnvOther(srcPath, dstPath)\n return ret\ndef moveFile(srcPath, dstPath):\n return True\n\n##################################\n# メイン処理\n##################################\ndef main():\n# 引数チェック\n parser = argparse.ArgumentParser()\n parser.add_argument(\"src_dir\")\n parser.add_argument(\"dst_dir\")\n parser.add_argument(\"bak_dir\")\n args = parser.parse_args()\n src_dir = args.src_dir\n dst_dir = args.dst_dir\n bak_dir = args.bak_dir\n pathItr = pathlib.Path(args.src_dir).resolve()\n for p in ([p for p in pathItr.glob('**/*') if p.is_file()]):\n # 移行元\n srcDir = str(p.parent)\n srcPath = str(p)\n # 移行先\n dstDir = srcDir.replace(src_dir, dst_dir)\n dstPath = srcPath.replace(src_dir, dst_dir)\n suffix = p.suffix\n # 移行\n print(\"{}\".format(srcPath))\n if cnvFile(srcPath, dstDir, dstPath, suffix):\n # 移行が成功したら、ファイルを移動\n moveFile(src_dir, bak_dir)\nif __name__ == \"__main__\":\n main()\n","sub_path":"ConvertToBox/ConvertToBox.py","file_name":"ConvertToBox.py","file_ext":"py","file_size_in_byte":3830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"140478460","text":"# Copyright 2015 ETH Zurich\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\n:mod:`types` --- SCION types\n============================\n\nFor all type classes that are used in multiple parts of the infrastructure.\n\"\"\"\n\n\nclass TypeBase(object): # pragma: no cover\n @classmethod\n def to_str(cls, type_, error=False):\n for attr in dir(cls):\n if getattr(cls, attr) == type_:\n return attr\n if not error:\n return \"UNKNOWN (%s)\" % type_\n raise IndexError\n\n\n############################\n# Basic types\n############################\nclass AddrType(TypeBase):\n NONE = 0\n IPV4 = 1\n IPV6 = 2\n SVC = 3\n UNIX = 4 # For dispatcher socket\n\n\nclass ExtensionClass(TypeBase):\n \"\"\"\n Constants for two types of extensions. These values are shared with L4\n protocol values, and an appropriate value is placed in next_hdr type.\n \"\"\"\n HOP_BY_HOP = 0\n END_TO_END = 222 # (Expected:-) number for SCION end2end extensions.\n\n\nclass ExtHopByHopType(TypeBase):\n TRACEROUTE = 0\n SIBRA = 1\n SCMP = 2\n HORNET = 3\n\n\nclass ExtEndToEndType(TypeBase):\n PATH_TRANSPORT = 0\n PATH_PROBE = 1\n\n\nclass L4Proto(TypeBase):\n NONE = 0\n SCMP = 1\n TCP = 6\n UDP = 17\n SSP = 152\n L4 = SCMP, TCP, UDP, SSP\n\n\n############################\n# Payload class/types\n############################\nclass PayloadClass(TypeBase):\n PCB = 0\n IFID = 1\n CERT = 2\n PATH = 3\n SIBRA = 4\n\n\nclass CertMgmtType(TypeBase):\n CERT_CHAIN_REQ = 0\n CERT_CHAIN_REPLY = 1\n TRC_REQ = 2\n TRC_REPLY = 3\n\n\nclass PathMgmtType(TypeBase):\n \"\"\"\n Enum of path management packet types.\n \"\"\"\n REQUEST = 0\n REPLY = 1\n REG = 2 # Path registration (sent by Beacon Server).\n SYNC = 3 # For records synchronization purposes (used by Path Servers).\n REVOCATION = 4\n IFSTATE_INFO = 5\n IFSTATE_REQ = 6\n\n\nclass PathSegmentType(TypeBase):\n \"\"\"\n PathSegmentType class, indicates a type of path request/reply.\n \"\"\"\n UP = 0 # Request/Reply for up-paths\n DOWN = 1 # Request/Reply for down-paths\n CORE = 2 # Request/Reply for core-paths\n GENERIC = 3 # FIXME(PSz): experimental for now.\n\n\nclass PCBType(TypeBase):\n SEGMENT = 0\n\n\nclass IFIDType(object):\n PAYLOAD = 0\n\n\nclass SIBRAPayloadType(TypeBase):\n EMPTY = 0\n\n\n############################\n# Router types\n############################\nclass RouterFlag(TypeBase):\n ERROR = 0\n NO_PROCESS = 1\n # Process this locally\n PROCESS_LOCAL = 2\n # Forward packet to supplied IFID\n FORWARD = 3\n # Packet has reached its destination ISD-AS\n DELIVER = 4\n # Deliver packet even if it hasn't reached its destination ISD-AS\n FORCE_DELIVER = 5\n\n\n############################\n# SIBRA types\n############################\nclass SIBRAPathType(TypeBase):\n STEADY = 0\n EPHEMERAL = 1\n","sub_path":"lib/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":3361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"43443247","text":"import tempfile\n\nfrom django.conf import settings\nfrom django.core.files.uploadedfile import TemporaryUploadedFile\nfrom django.core.files.uploadhandler import TemporaryFileUploadHandler\n\n\nclass TemporaryUploadedGzipFile(TemporaryUploadedFile):\n def __init__(self, name, content_type, size, charset):\n if settings.FILE_UPLOAD_TEMP_DIR:\n file = tempfile.NamedTemporaryFile(suffix='.json.gz',\n dir=settings.FILE_UPLOAD_TEMP_DIR,\n delete=False)\n else:\n file = tempfile.NamedTemporaryFile(suffix='.json.gz', delete=False)\n super(TemporaryUploadedFile, self).__init__(file, name, content_type, size, charset)\n\n\nclass TemporaryGzipFileUploadHandler(TemporaryFileUploadHandler):\n def new_file(self, file_name, *args, **kwargs):\n super(TemporaryFileUploadHandler, self).new_file(file_name, *args, **kwargs)\n self.file = TemporaryUploadedGzipFile(self.file_name, self.content_type, 0, self.charset)\n\n","sub_path":"warehouse/backup/uploadhandler.py","file_name":"uploadhandler.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"240577531","text":"from tweepy import Stream\r\nfrom tweepy import OAuthHandler\r\nfrom tweepy.streaming import StreamListener\r\nimport time\r\nimport json\r\nimport sys\r\nfrom datetime import datetime\r\nimport os\r\n\r\n# consumer key, consumer secret, access token, access secret.\r\nckey = \"\"\r\ncsecret = \"\"\r\natoken = \"\"\r\nasecret = \"\"\r\n\r\nfilter_list = ['bitcoin', 'ethereum']\r\n\r\n\r\ndef handle_new_lines():\r\n\t\"\"\"handle new lines on different OS\"\"\"\r\n\tif os.name == 'nt':\r\n\t\tn_line = '\\n'\r\n\telif os.name == 'posix':\r\n\t\tn_line = '\\r\\n'\r\n\telse:\r\n\t\tprint(\"Unkown OS.\")\r\n\t\tn_line = '\\n'\r\n\treturn n_line\r\n\r\n\r\ndef create_stop_file():\r\n\tos.chdir(os.path.dirname(os.path.realpath(__file__)))\r\n\tf = open(\"deleteMeToStopTweetCollection.txt\", \"w\")\r\n\tf.close()\r\n\r\n\r\nnew_line = handle_new_lines()\r\ncreate_stop_file()\r\n\r\n\r\nclass listener(StreamListener):\r\n\r\n\tdef on_data(self, data):\r\n\t\tif not os.path.isfile(\"deleteMeToStopTweetCollection.txt\"):\r\n\t\t\treturn False\r\n\t\telse:\r\n\t\t\ttry:\r\n\t\t\t\tparsed_data = json.loads(data)\r\n\t\t\t\ttweet = parsed_data['text'].replace('\\n', '')\r\n\t\t\t\ttimestamp = str(datetime.now())\r\n\t\t\t\tprint(timestamp)\r\n\r\n\t\t\t\tcontent_to_keep = '::::'.join([timestamp, tweet]) + new_line\r\n\t\t\t\tcontent_to_keep = content_to_keep.encode(\"utf-8\").decode(\"utf-8\")\r\n\t\t\t\t# '::::' is a delimiter that we don't expect to appear in tweet text\r\n\t\t\t\twith open('stream_data.txt', 'a', encoding=\"utf-8\") as f:\r\n\t\t\t\t\tf.write(content_to_keep)\r\n\t\t\t\treturn True\r\n\t\t\texcept Exception as e:\r\n\t\t\t\texc_type, exc_obj, exc_tb = sys.exc_info()\r\n\t\t\t\tprint(\r\n\t\t\t\t\tstr(e) + ' line: ' + str(exc_tb.tb_lineno) + ', ' +\r\n\t\t\t\t\tstr(datetime.now()) + new_line)\r\n\r\n\t\t\t\treturn True\r\n\r\n\tdef on_error(self, status):\r\n\t\tprint(status)\r\n\r\n\r\ndef run_streamer():\r\n\t\tauth = OAuthHandler(ckey, csecret)\r\n\t\tauth.set_access_token(atoken, asecret)\r\n\t\ttwitterStream = Stream(auth, listener())\r\n\t\ttwitterStream.filter(track=filter_list)\r\n\r\nif __name__ == '__main__':\r\n\trun_streamer()\r\n","sub_path":"tweet_collector.py","file_name":"tweet_collector.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"16215462","text":"\n\n#calss header\nclass _UNTOUCHED():\n\tdef __init__(self,): \n\t\tself.name = \"UNTOUCHED\"\n\t\tself.definitions = [u'not changed or spoiled in any way: ', u'If food is untouched, it has not been eaten: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_untouched.py","file_name":"_untouched.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"118531473","text":"from django.db import models\nfrom django.contrib.auth.models import User\nimport datetime\nimport Levenshtein as L\n# Create your models here.\n\n\nclass UserProfile(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n avatar = models.ImageField(upload_to=\"avatars/\")\n\n\nclass Session(models.Model): #date, browser_family, browser_version, ip, device, system_family, system_version\n date = models.DateTimeField() #mobile, tablet, touch_capable, pc, bot\n browser_family = models.TextField() # request.user_agent.browser.family\n browser_version = models.TextField() # request.user_agent.browser.version_string\n ip = models.GenericIPAddressField() #request.REMOTE_ADDR #for comparison check only theese last ip's numbers\n ##Below fields can't change from machine to machine\n device = models.TextField() # request.user_agent.device.family\n system_family = models.TextField() # request.user_agent.os.family\n system_version = models.TextField() # request.user_agent.os.version_string\n mobile = models.BooleanField() #request.user_agent.is_mobile\n tablet = models.BooleanField() #request.user_agent.is_tablet\n touch_capable = models.BooleanField() #request.user_agent.is_touch_capable\n pc = models.BooleanField() #request.user_agent.is_pc\n bot = models.BooleanField() #request.user_agent.is_bot\n #META['APPDATA']\n #META['COMPUTERNAME']\n #META['DRIVERDATA']\n ##other meta\n meta = models.TextField()\n\n\n\n\n##RETURNS AVATAR URL\ndef getAvatar(usr):\n return UserProfile.objects.get(user=usr).avatar.url\n\n##FINDS SESSION FROM COOKIES\n\n\n\ndef CreateSession(request):\n date = datetime.datetime.today()\n browser_family = request.user_agent.browser.family\n browser_version = request.user_agent.browser.version_string\n ip = request.META['REMOTE_ADDR']\n device = request.user_agent.device.family\n os_family = request.user_agent.os.family\n os_version = request.user_agent.os.version_string\n mobile = request.user_agent.is_mobile\n tablet = request.user_agent.is_tablet\n touch = request.user_agent.is_touch_capable\n pc = request.user_agent.is_pc\n bot = request.user_agent.is_bot\n meta = ''\n try:\n meta = request.META['CSRF_COOKIE']\n except:\n pass\n sess = Session.objects.get_or_create(\n date=date, browser_family=browser_family, browser_version=browser_version, ip=ip, device=device, system_family=os_family,\n system_version=os_version, mobile=mobile, tablet=tablet, touch_capable=touch, pc=pc, bot=bot, meta=meta\n )\n return sess\n\n\n\ndef sessRatio(s1, s2):\n ratio = L.seqratio([s1.browser_family, s1.device, s1.os_family, s1.os_version, s1.ip, s1.browser_version],\n [s2.browser_family, s2.device, s2.os_family, s2.os_version, s2.ip, s2.browser_version])\n ratio += boolRatio(s1.mobile==s2.mobile, s1.tablet==s2.tablet, s1.touch==s2.touch, s1.pc==s2.pc, s1.bot==s2.bot)\n if(len(s1.meta)>0 and len(s2.meta)>0):\n ratio += L.ratio(s1.meta, s2.meta)\n ratio /= 3\n else:\n ratio /= 2\n return ratio\n\n\ndef boolRatio(b1, b2, b3, b4, b5):\n dist = 0\n if(b1):\n dist+=1\n if(b2):\n dist+=1\n if(b3):\n dist+=1\n if(b4):\n dist+=1\n if(b5):\n dist+=1\n return dist/5\n","sub_path":"login/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"191471106","text":"#!/usr/bin/env python\n\"\"\"\nScript to (try and) extract all necessary data for a file from a storage element\nto be able to register this file in the DIRAC File Catalogue\nTakes a full path or a directory as input.\nNeeds valid grid UI:\nsource /cvmfs/grid.cern.ch/umd-c7ui-latest/etc/profile.d/setup-c7-ui-example.sh\nand a valid proxy:\nvoms-proxy-init --valid 24:00 --voms [your VO goes here]\nIf the command is really slow, you can try to force it to use Imperial College's\nbdii instead of the default. Before you start the script, do:\nexport LCG_GFAL_INFOSYS=localbdii.grid.hep.ph.ic.ac.uk:2170\nOutput:\nprints path, checksum (adler32) and size (bytes) to a file\n\"\"\"\nfrom __future__ import print_function\nimport os\nimport stat\nimport sys\nimport argparse\nimport gfal2\n\ndef list_dir(cntxt, dir_name, fd, depth=0):\n \"\"\"lists all the files in a directory and its subdirectories\"\"\"\n if depth < 2:\n print(\"Processing: %s\" %dir_name)\n\n counter = 0\n subdirs = []\n n_of_tries = 0\n thing = None\n while n_of_tries < 3:\n try:\n content = cntxt.listdir(dir_name)\n subdirs = []\n for thing in content:\n fullpath = os.path.join(dir_name, thing)\n info = cntxt.stat(fullpath)\n if stat.S_ISDIR(info.st_mode):\n subdirs.append(fullpath)\n else:\n filesize = info.st_size\n filesum = cntxt.checksum(fullpath, \"adler32\")\n counter = counter+1\n if counter%100 == 0:\n print(\"Number of processed files: %s\" %counter)\n process_file(fullpath, filesize, filesum, fd)\n break\n except Exception as process_except:\n n_of_tries += 1\n print(\"Failed to process %s (%s), file: %s\" %(dir_name, process_except, thing))\n for d in subdirs:\n list_dir(cntxt, d, fd, depth=depth+1)\n\n\ndef single_file(cntxt, fullpath, outputfile):\n \"\"\"extracts information if a single file is given as input\"\"\"\n info = cntxt.stat(fullpath)\n filesize = info.st_size\n filesum = cntxt.checksum(fullpath, \"adler32\")\n process_file(fullpath, filesize, filesum, outputfile)\n\n\ndef process_file(filename, filesize, filesum, outputfile):\n \"\"\"format output\"\"\"\n outputfile.write(\"%s %s %u\\n\" %(filename, filesum, filesize))\n\n\ndef main():\n \"\"\"Definition of all arguments, help function, etc. Entry point to program.\"\"\"\n\n parser = argparse.ArgumentParser(description=\"List file size and chechksum. Needs a valid grid UI and proxy.\",\n epilog=\"Example: ./extract_file_data.py -d srm://gfe02.grid.hep.ph.ic.ac.uk/pnfs/hep.ph.ic.ac.uk/data/gridpp/gridpp/user/daniela.bauer -o myfiles.txt\")\n parser.add_argument(\"-d\", \"--directory\", help=\"full path (including storage element name) to a directory\")\n parser.add_argument(\"-f\", \"--filename\", help=\"full path (including storage element name) to a file\")\n req_grp = parser.add_argument_group(title='required arguments')\n req_grp.add_argument('-o', \"--output\", required=True, help=\"output file name\")\n args = parser.parse_args()\n # 1 is the program itself, how could I forget\n if len(sys.argv) != 5:\n print(\"Please specify [either a directory or a file] and the output file for the results.\")\n sys.exit(0)\n\n file_descriptor = open(args.output, \"w\")\n ctxt = gfal2.creat_context()\n\n if args.directory:\n # print(args.directory)\n list_dir(ctxt, args.directory, file_descriptor)\n elif args.filename:\n single_file(ctxt, args.filename, file_descriptor)\n else:\n print(\"Something went wrong.\")\n\n file_descriptor.close()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"user/extract_file_data.py","file_name":"extract_file_data.py","file_ext":"py","file_size_in_byte":3519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"262787831","text":"class ListADT:\r\n def __init__(self, size ):\r\n \"\"\"\r\n this function initialize the class List Adt\r\n param the new class defined and size of the list\r\n return none\r\n pre none\r\n post a list class have been created\r\n complexity best and worst case: O(1)\r\n \"\"\" \r\n self.the_array = [None]*size\r\n self.length = 0\r\n\r\n def __str__(self):\r\n \"\"\"\r\n this function returns string of the array\r\n param the list class\r\n return string of the list\r\n pre none\r\n post the string of the list will be forwarded\r\n complexity best and worst case: O(n), where n = size of list\r\n \"\"\"\r\n string_return = \"\"\r\n for index in range(self.length):\r\n string_return += str(self.the_array[index])\r\n string_return +=\"\\n\"\r\n \r\n return string_return\r\n\r\n def __len__ (self):\r\n \"\"\"\r\n this function returns the length of the array\r\n param the list class\r\n return length\r\n pre none\r\n post the length returned\r\n complexity best and worst case: O(1)\r\n \"\"\"\r\n return self.length\r\n \r\n def __getitem__ (self, index):\r\n \"\"\"\r\n this function will return the item \r\n param the list class, index of item wanted to be returned\r\n return the item in the index of index\r\n pre none\r\n post item returned\r\n complexity best and worst case: O(1)\r\n \"\"\"\r\n if (index < 0):\r\n index = self.length + index\r\n if (index < 0):\r\n raise IndexError (\"Index Out Of Range\")\r\n else:\r\n return self.the_array[index]\r\n elif(index > self.length):\r\n raise IndexError (\"Index over than length\")\r\n else:\r\n return self.the_array[index]\r\n \r\n def __setitem__ (self, index, item):\r\n \"\"\"\r\n this function will put an item into the list \r\n param the list class, index and the item to be placed\r\n return none\r\n pre none\r\n post item is initialize\r\n complexity best and worst case: O(1)\r\n \"\"\"\r\n if (index < 0):\r\n index = self.length + index\r\n if(index < 0):\r\n raise IndexError (\"Index Out Of Range\")\r\n else:\r\n self.the_array[index] = item\r\n \r\n elif(index > self.length):\r\n raise IndexError (\"Index over than length\")\r\n else:\r\n self.the_array[index] = item\r\n\r\n\r\n\r\n def __eq__(self, other):\r\n \"\"\"\r\n this function will test if the two list is equivalent\r\n param the list class, the thing wanted to be compared with\r\n return true or false (equivalent or not)\r\n pre none\r\n post none\r\n complexity best and worst case: O(mn), where m is the length of self list \r\n and n is the length of other\r\n \"\"\"\r\n return str(self)==str(other)\r\n\r\n\r\n def insert(self, index, item):\r\n \"\"\"\r\n this function will insert item into the list \r\n param the list class, index and the item to be placed\r\n return none\r\n pre none\r\n post item is initialize into the array\r\n complexity best and worst case: O(n), n is equal to the length of the array \r\n minus the index required\r\n \"\"\"\r\n if self.is_empty():\r\n self.append(item)\r\n elif self.is_full():\r\n raise Exception(\"List is full\")\r\n elif(index < 0):\r\n index = self.length + index + 1\r\n if (index < 0):\r\n raise IndexError(\"Index Out Of Range\")\r\n else:\r\n for i in range(self.length - index):\r\n self.the_array[self.length - i] = self.the_array[self.length - i -1]\r\n self.the_array[index] = item\r\n self.length += 1\r\n \r\n elif(index > self.length):\r\n raise IndexError (\"Index over than length\")\r\n else:\r\n for i in range(self.length - index):\r\n self.the_array[self.length - i] = self.the_array[self.length - i -1]\r\n self.the_array[index] = item\r\n self.length += 1\r\n \r\n\r\n \r\n \r\n def delete(self, index):\r\n \"\"\"\r\n this function will delete an item \r\n param the list class, index where the item wanted to be deleted\r\n return none\r\n pre none\r\n post item is deletd in the array\r\n complexity best and worst case: O(n), n is equal to the length of the array \r\n minus the index required\r\n \"\"\"\r\n if self.is_empty():\r\n print(\"List is empty\")\r\n elif(index < 0):\r\n index = self.length + index\r\n if (index < 0):\r\n raise IndexError (\"Index out of range\")\r\n else:\r\n for i in range(self.length - index - 1):\r\n self.the_array[index + i] = self.the_array[index + i + 1]\r\n self.length -= 1\r\n self.the_array[self.length] = None \r\n elif(index > self.length):\r\n raise IndexError (\"Index over than length\")\r\n else:\r\n for i in range(self.length - index - 1):\r\n self.the_array[index + i] = self.the_array[index + i + 1]\r\n self.length -= 1\r\n self.the_array[self.length] = None\r\n \r\n\r\n def is_empty(self):\r\n return self.length == 0\r\n\r\n def is_full(self):\r\n return self.length == len(self.the_array)\r\n\r\n def __contains__(self, item):\r\n for i in range(self.length):\r\n if item == self.the_array[i]:\r\n return True\r\n return False\r\n \r\n def append(self, item):\r\n if not self.is_full():\r\n self.the_array[self.length] = item\r\n self.length +=1\r\n else:\r\n raise Exception('List if Full')\r\n\r\n def unsafe_set_array(self,array,length):\r\n \"\"\"\r\n UNSAFE: only to be used during testing to facilitate it!! DO NOT USE FOR ANYTHING ELSE\r\n \"\"\"\r\n if 'test' not in __name__:\r\n raise Exception('Not runnable')\r\n\t\t\t\r\n self.the_array = array\r\n self.length = length\r\n","sub_path":"GIT/Data Structures/Lists/List.py","file_name":"List.py","file_ext":"py","file_size_in_byte":6663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"73457980","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom cohesity_app_sdk.configuration import Configuration\r\n\r\n\r\nclass CustomAuth:\r\n\r\n @classmethod\r\n def apply(cls, http_request):\r\n \"\"\" Add CustomAuth authentication to the request.\r\n\r\n Args:\r\n http_request (HttpRequest): The HttpRequest object to which\r\n authentication header will be added.\r\n\r\n \"\"\"\r\n token = Configuration.app_auth_token\r\n http_request.headers['Authorization'] = \"Bearer {}\".format(token)\r\n","sub_path":"cohesity_app_sdk/http/auth/custom_auth.py","file_name":"custom_auth.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"60959368","text":"# -*- coding: utf-8 -*-\n\nimport sys\nfrom gerarLista import gera_lista\n\nfrom nltk.classify.util import apply_features\nimport subprocess\nfrom getFeaturesVector import *\nimport sys\nfrom normalizar_Colecao import *\n\nfrom nltk import wordpunct_tokenize\nfrom nltk.corpus import stopwords\n\nfeatureList=[]\nmsg_input=[]\nlista_msg=[]\nmessage=[]\nlista_feature_fell=[]\n\nadvNeg=['nao','tampouco','tambem nao','nunca','negativamente','jamais','de modo algum','de jeito nenhum','de forma nenhuma']\nlistaAdvNeg=[]\n\ndef busca_AdvNeg(word):\n for w in word:\n if (w in advNeg):\n listaAdvNeg.append(w)\n return listaAdvNeg\n\ndef extract_features(message):\n lista_msg=message\n features={}\n for word in featureList:\n features['contains(%s)' %word] = (word in lista_msg)\n #print(\"lista_features[ contain(%s) ]= %s\" %(word,word in lista_msg))\n return features\n\n# Gerar lista de caracteristicas de cada colecao: Fatec, Dilma, Copa e Palmeiras\ndef gera_lista_features(tema): \n #all_features=[]\n features=[]\n listaColecao = tema\n i=1\n y=0\n listaMsg=[]\n listaFell=[]\n # Chamando gera_lista \n lista=gera_lista(listaColecao)\n #Separa a mensagem e o sentimento\n for x in lista:\n if(i%2 == 1):\n listaMsg.append(x)\n # print(\"Mensagem - %s \"%x)\n else:\n listaFell.append(x)\n # print (\"Sentimento - %s \"%x)\n i+=1\n while ( y < len(listaMsg)): \n features = getAllFeatures(listaMsg[y],features) # Todas palavras relevantes/caracteristicas da mensagem\n featureVector = getFeatureVector(listaMsg[y])\n lista_feature_fell.append((featureVector,listaFell[y]))\n y+=1\n return features\n\ndef get_lista_feature_fell():\n return lista_feature_fell\n\ndef get_feature_list():\n return featureList\n\ndef avaliar_Sentimento(message,training):\n training_set = training\n #print (\"\\n\\tTraining_set -> %s\\n\"%training)\n classifier= nltk.NaiveBayesClassifier.train(training_set)\n #print(classifier.show_most_informative_features(300))\n #print (\"\\n\\tSentimento Provavel: %s \\n\"%(classifier.classify(extract_features(message))))\n valor= classifier.classify(extract_features(message))\n return valor\n \n\n\ndef _calculate_languages_ratios(text):\n \"\"\"\n Calculate probability of given text to be written in several languages and\n return a dictionary that looks like {'french': 2, 'spanish': 4, 'english': 0}\n \n @param text: Text whose language want to be detected\n @type text: str\n \n @return: Dictionary with languages and unique stopwords seen in analyzed text\n @rtype: dict\n \"\"\"\n\n languages_ratios = {}\n\n '''\n nltk.wordpunct_tokenize() splits all punctuations into separate tokens\n \n >>> wordpunct_tokenize(\"That's thirty minutes away. I'll be there in ten.\")\n ['That', \"'\", 's', 'thirty', 'minutes', 'away', '.', 'I', \"'\", 'll', 'be', 'there', 'in', 'ten', '.']\n '''\n\n tokens = wordpunct_tokenize(text)\n words = [word.lower() for word in tokens]\n\n # Compute per language included in nltk number of unique stopwords appearing in analyzed text\n for language in stopwords.fileids():\n if (language == \"portuguese\"):\n lista=stopwords.words(language)\n lista.append('Fatec')\n lista.append('fatec')\n lista.append('Palmeiras')\n lista.append('palmeiras')\n lista.append('Dilma')\n lista.append('dilma')\n lista.append('Copa')\n lista.append('copa')\n stopwords_set=set(lista)\n else: \n stopwords_set = set(stopwords.words(language))\n words_set = set(words)\n common_elements = words_set.intersection(stopwords_set)\n languages_ratios[language] = len(common_elements) # language \"score\"\n\n return languages_ratios\n\n\n\ndef detect_language(text):\n \"\"\"\n Calculate probability of given text to be written in several languages and\n return the highest scored.It uses a stopwords based approach, counting how many unique stopwords\n are seen in analyzed text.@param text: Text whose language want to be detected\n @type text: str\n @return: Most scored language guessed\n @rtype: str\n \"\"\"\n\n ratios = _calculate_languages_ratios(text)\n\n most_rated_language = max(ratios, key=ratios.get)\n\n return most_rated_language\n\n#########################################################################################\n\ndef avaliacao_final(adv_neg, naive):\n #print(\"\\n\\tVindo do naive: %s \"%naive)\n #print(\"\\n\\tadv_neg: %d \"%len(adv_neg))\n \n if (len(adv_neg) > 0 and naive == \"negativo\"):\n print (\"\\n\\tSentimento Provavel: positivo\")\n if (len(adv_neg) > 0 and naive == \"positivo\"):\n print (\"\\n\\tSentimento Provavel: negativo\")\n if(len(adv_neg) == 0 and naive == \"negativo\"):\n print (\"\\n\\tSentimento Provavel: negativo\")\n if(len(adv_neg) == 0 and naive == \"positivo\"):\n print (\"\\n\\tSentimento Provavel: positivo\") \n\n \n\nif __name__ == '__main__':\n if (len(sys.argv) == 3 and (sys.argv[1] != '') and (sys.argv[2] == 'fatec' or sys.argv[2] == 'dilma' or sys.argv[2] == 'copa' or sys.argv[2] == 'palmeiras')):\n # Limpar a tela\n #subprocess.call(\"clear\")\n listaColecao = sys.argv[2]\n print (\"\\n\\t\\tAnálise de Sentimento\\n\\t\\tAssunto: %s \"%listaColecao.upper())\n # Gera a lista de caracteristicas usada no metodo extract_features\n featureList = gera_lista_features(listaColecao)\n print (\"\\n\\tCaracteristicas conhecidas:\\n\\t%s \"%(featureList))\n lista_feature_fell = get_lista_feature_fell()\n #print(\"\\n\\tCaracteristica / Sentimento:\\n\\t %s\"%lista_feature_fell)\n tema = listaColecao\n msg=sys.argv[1]\n nTermos = len(msg.split()) # Conta quantos termos tem a frase de entrada\n language = detect_language(msg)\n #print (\"\\n\\tLingua: %s \"%language) \n if ( language == 'portuguese' and nTermos > 1):\n print(\"\\n\\tAnalisar Msg: %s \"%msg.capitalize())\n msg2 = normalizar(msg)\n #print(\"\\n\\tNormalizado: %s \"%msg2)\n lista_msg=getFeatureVector(msg2)\n print (\"\\n\\tCaracteristicas da Msg - %s \"%lista_msg)\n message=lista_msg\n adv_neg = busca_AdvNeg(message)\n #if(len(adv_neg) > 0):\n # print (\"\\n\\tAdverbios de Negacao: %s\\n \"%adv_neg)\n training_set = apply_features(extract_features,lista_feature_fell)\n print(training_set)\n # Avalia mensagem\n retorno_naive = avaliar_Sentimento(message,training_set)\n avaliacao_final(adv_neg,retorno_naive)\n print(\"\\n\\n\")\n else:\n print (\"\\n\\tPor favor insira o texto novamente\\n\\n\")\n else:\n print ('\\nUsage: python testarMsg.py msg fatec|dilma|copa|palmeiras\\n')\n","sub_path":"versao2/testarMsg.py","file_name":"testarMsg.py","file_ext":"py","file_size_in_byte":6893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"83624594","text":"#!/usr/bin/env python3\n\n# original by charles leifer: https://charlesleifer.com/blog/using-python-and-k-means-to-find-the-dominant-colors-in-images/\nfrom collections import namedtuple\nfrom math import sqrt\nimport random\ntry:\n import Image\nexcept ImportError:\n from PIL import Image\nimport shelve\nfrom os import path\nimport os\n\nMY_PATH = path.abspath(path.dirname(__file__))\nDATA_SHELVE_DIR = path.join(MY_PATH, \"shelves\")\nDOMINANT_COLORS_SHELVE = path.join(DATA_SHELVE_DIR, \"dominant_colors.shelve\") # for each imagepath, contains a list of int-encoded colors\nos.makedirs(DATA_SHELVE_DIR, exist_ok=True)\n\nPoint = namedtuple('Point', ('coords', 'n', 'ct'))\nCluster = namedtuple('Cluster', ('points', 'center', 'n'))\n\ndef get_points(img):\n points = []\n w, h = img.size\n for count, color in img.getcolors(w * h):\n points.append(Point(color, 3, count))\n return points\n\nrtoh = lambda rgb: '#%s' % ''.join(('%02x' % p for p in rgb))\n\ndef colorz(filename, n=3):\n img = Image.open(filename)\n img.thumbnail((200, 200))\n\n # mainly needed for gifs\n rgb_img = img.convert('RGB')\n img = rgb_img\n\n w, h = img.size\n\n points = get_points(img)\n try:\n clusters = kmeans(points, n, 1)\n except ValueError as e:\n print(\"Encoundered ValueError in kmeans because there are not enough colors for making {} clusters.\\nUsing only one cluster to be on the safe side...\".format(n))\n clustersx = kmeans(points, k=1, min_diff=1)\n # use the clustersx n times\n clusters = clustersx * n\n rgbs = [map(int, c.center.coords) for c in clusters]\n return map(rtoh, rgbs)\n\ndef euclidean(p1, p2):\n return sqrt(sum([\n (p1.coords[i] - p2.coords[i]) ** 2 for i in range(p1.n)\n ]))\n\ndef calculate_center(points, n):\n vals = [0.0 for i in range(n)]\n plen = 0\n for p in points:\n plen += p.ct\n for i in range(n):\n vals[i] += (p.coords[i] * p.ct)\n return Point([(v / plen) for v in vals], n, 1)\n\ndef kmeans(points, k, min_diff):\n clusters = [Cluster([p], p, p.n) for p in random.sample(points, k)]\n\n while 1:\n plists = [[] for i in range(k)]\n\n for p in points:\n smallest_distance = float('Inf')\n for i in range(k):\n distance = euclidean(p, clusters[i].center)\n if distance < smallest_distance:\n smallest_distance = distance\n idx = i\n plists[idx].append(p)\n\n diff = 0\n for i in range(k):\n old = clusters[i]\n center = calculate_center(plists[i], old.n)\n new = Cluster(plists[i], center, old.n)\n clusters[i] = new\n diff = max(diff, euclidean(old.center, new.center))\n\n if diff < min_diff:\n break\n\n return clusters\n\ndef str_color_to_int(colo):\n if colo.startswith('#'):\n colo = colo[1:]\n i = int(colo, 16)\n if i >= 2**23:\n i -= 2**24\n return i\n\ndef int_to_color_str(int_colo):\n return '#%06x'%((int_colo+2**24)%2**24)\n\ndef extract_color_information(imagepath, num_colors=5, shelfpath=DOMINANT_COLORS_SHELVE, reuse=True, verbose=True):\n with shelve.open(shelfpath) as shelf:\n fpath = path.normpath(path.abspath(imagepath))\n if verbose:\n print(\"Extracting color info from {}\".format(fpath))\n if reuse and fpath in shelf.keys():\n return # we already have the colors\n shelf[fpath] = list(map(str_color_to_int, colorz(imagepath, n=num_colors)))\n\ndef setup_for_all_dirs(training_dirs):\n for dirpath in training_dirs:\n files_in_dirpath = os.listdir(dirpath)\n for f in files_in_dirpath:\n try:\n extract_color_information(path.join(dirpath, f))\n except OSError:\n print(\"Cannot open {} for reading color values\".format(f))\n","sub_path":"ambiente/dominance.py","file_name":"dominance.py","file_ext":"py","file_size_in_byte":3864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"338376381","text":"from DateTime import DateTime\nfrom AccessControl import ClassSecurityInfo\nfrom Products.CMFCore.utils import UniqueObject\nfrom Products.CMFCore.permissions import ListFolderContents, \\\n ModifyPortalContent, View\nfrom plone.app import folder\nfrom Products.Archetypes.public import *\nfrom bika.lims.content.organisation import Organisation\nfrom bika.lims.config import ManageBika, I18N_DOMAIN, PROJECTNAME\nfrom bika.lims import bikaMessageFactory as _\n\nschema = Organisation.schema.copy() + Schema((\n IntegerField('Confidence',\n schemata = 'Accreditation',\n widget = IntegerWidget(\n label = _(\"Confidence Level %\"),\n description = _(\"This value is reported at the bottom of all published results\"),\n ),\n ),\n StringField('LabURL',\n schemata = 'Address',\n write_permission = ManageBika,\n widget = StringWidget(\n size = 60,\n label = _(\"Lab URL\"),\n description = _(\"The Laboratory's web address\"),\n ),\n ),\n BooleanField('LaboratoryAccredited',\n default = True,\n schemata = 'Accreditation',\n write_permission = ManageBika,\n widget = BooleanWidget(\n label = _(\"Laboratory Accredited\"),\n description = _(\"Check this box if your laboratory is accredited\"),\n ),\n ),\n StringField('AccreditationBody',\n schemata = 'Accreditation',\n write_permission = ManageBika,\n widget = StringWidget(\n label = _(\"Accreditation Body Abbreviation\"),\n description = _(\"E.g. SANAS, APLAC, etc.\"),\n ),\n ),\n StringField('AccreditationBodyLong',\n schemata = 'Accreditation',\n write_permission = ManageBika,\n widget = StringWidget(\n size = 60,\n label = _(\"Accreditation Body\"),\n description = _(\"The name of the accreditation body corresponding to the abbreviation above, \"\n \" e.g. South African National Accreditation Service for SANAS\"),\n ),\n ),\n StringField('AccreditationBodyURL',\n schemata = 'Accreditation',\n write_permission = ManageBika,\n widget = StringWidget(\n label = _(\"Accreditation Body URL\"),\n description = _(\"Web address for the accreditation body\"),\n ),\n ),\n StringField('Accreditation',\n schemata = 'Accreditation',\n write_permission = ManageBika,\n widget = StringWidget(\n label = _(\"Accreditation\"),\n description = _(\"The accreditation standard that applies, e.g. ISO 17025\"),\n ),\n ),\n StringField('AccreditationReference',\n schemata = 'Accreditation',\n write_permission = ManageBika,\n widget = StringWidget(\n label = _(\"Accreditation Reference\"),\n description = _(\"The reference code issued to the lab by the accreditation body\"),\n ),\n ),\n))\n\n\nIdField = schema['id']\nIdField.widget.visible = {'edit':'hidden', 'view': 'invisible'}\n\nschema['Name'].validators = ()\n# Update the validation layer after change the validator in runtime\nschema['Name']._validationLayer()\n\nclass Laboratory(UniqueObject, Organisation):\n security = ClassSecurityInfo()\n schema = schema\n\n # XXX: Temporary workaround to enable importing of exported bika\n # instance. If '__replaceable__' is not set we get BadRequest, The\n # id is invalid - it is already in use.\n __replaceable__ = 1\n\n security.declareProtected(View, 'getSchema')\n def getSchema(self):\n return self.schema\n\nregisterType(Laboratory, PROJECTNAME)\n\n","sub_path":"bika/lims/content/laboratory.py","file_name":"laboratory.py","file_ext":"py","file_size_in_byte":3604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"107040890","text":"import cv2\nimport glob\nimport os\nfrom tqdm import tqdm\n\ndef videoToImages(videoFileName, outputFolder, interval = 1):\n \"\"\" The function videoToImages saves each n'th frame of a video as an image\n Input arguments are:\n videoFileName: Path to the video file to be processed\n outputFolder: Path of the folder where the images will be saved\n interval: number of frames to be skipped between each frame write, Default is 1 \"\"\"\n\n cap = cv2.VideoCapture(videoFileName) # Open the video file\n counter = 0\n length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) # Get total frames\n for counter in tqdm(range(length), ncols = 110, desc = \"Splitting Video : {} , Progress ->\".format(videoFileName)):\n _, frame = cap.read()\n\n if counter%interval==0:\n # Write image to disk\n cv2.imwrite (os.path.join(outputFolder, os.path.splitext(os.path.basename (videoFileName))[0]+\"_\"+str(int(counter/interval)).zfill(5)+\".jpg\"), frame)\n \n # Cleanup\n cap.release()\n\nvideosFolder = r\"..\\videos\\train\" # Folder location on disk where video files are stored\noutputFolder = r\"..\\images\\train\" # Folder location on disk where the image frames will be stored\nvideoFiles = glob.glob(videosFolder+\"/*\")\n\n# Create output folder if it does not exists\nif not os.path.exists(outputFolder):\n os.mkdir(outputFolder)\n\nfor videoFileName in videoFiles:\n videoToImages(videoFileName, outputFolder, 20)\n","sub_path":"helperCode/vidToImages.py","file_name":"vidToImages.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"131953123","text":"import sqlite3\r\nfrom _datetime import datetime\r\nimport re\r\nimport Family\r\nimport pandas as pd\r\nimport GEDCOM_parser\r\nimport operator\r\n\r\nconn = sqlite3.connect('DATA_Sprint3.db')\r\nconn.text_factory = str\r\n\r\nindi_table = pd.read_sql_query('SELECT * FROM INDIVIDUAL', conn)\r\nfam_table = pd.read_sql_query('SELECT * FROM FAMILY', conn)\r\nprint('INDIVIDUAL TABLE')\r\nprint(indi_table)\r\nprint('FAMILY TABLE')\r\nprint(fam_table)\r\n\r\n#Pranit Kulkarni\r\ndef US18():\r\n query = \"SELECT HUSBAND_ID,WIFE_ID FROM FAMILY\"\r\n\r\n def getFamilyId(id):\r\n query = \"SELECT ID FROM FAMILY WHERE CHILDREN LIKE '%\" + id + \"%' \"\r\n result = conn.execute(query)\r\n cursor = result.fetchone()\r\n if (cursor == None):\r\n return None\r\n # print(\"Family ID found: \"+cursor[0]+\" for \"+id)\r\n return cursor[0]\r\n\r\n def US18_main():\r\n result = conn.execute(query)\r\n rows = result.fetchall()\r\n\r\n for row in rows:\r\n husband_id = row[0]\r\n wife_id = row[1]\r\n\r\n husband_family_id = getFamilyId(husband_id)\r\n wife_family_id = getFamilyId(wife_id)\r\n\r\n if (husband_family_id == wife_family_id and husband_family_id != None):\r\n print(\"ERROR: US18: \" + husband_id + \" and \" + wife_id + \" who are married are siblings\")\r\n\r\n US18_main()\r\n\r\n\r\ndef US09():\r\n query = \"SELECT HUSBAND_ID,WIFE_ID FROM FAMILY\"\r\n\r\n date_format = \"%d %b %Y\"\r\n\r\n def getDeathDate(id):\r\n # print(id)\r\n query = \"SELECT DEATH FROM INDIVIDUAL WHERE ALIVE = 'False' AND ID = ?\", (id,)\r\n result = conn.execute(\"SELECT DEATH FROM INDIVIDUAL WHERE ALIVE = 'False' AND ID = ?\", (id,))\r\n row = result.fetchone()\r\n\r\n if row == None:\r\n return None\r\n\r\n return row[0]\r\n\r\n def formatChildrenData(siblings):\r\n punctuation = [\"{\", \"}\", \",\"]\r\n for characters in punctuation:\r\n siblings = siblings.replace(characters, \" \").strip()\r\n childrenData = siblings.split(\" \")\r\n return childrenData\r\n\r\n def getChildBirthDate(id):\r\n # query = \"SELECT BIRTH FROM INDIVIDUAL WHERE ID = \"+id\r\n cursor = conn.execute(\"SELECT BIRTHDAY FROM INDIVIDUAL WHERE ID = ?\", (id,))\r\n result = cursor.fetchone()\r\n\r\n if result == None:\r\n return None\r\n\r\n birthdate = result[0]\r\n\r\n return birthdate\r\n\r\n def calculateDifference(child_birth_date, father_death_date, mother_death_date):\r\n if child_birth_date != None:\r\n birthdate = datetime.strptime(child_birth_date, date_format).date()\r\n\r\n if mother_death_date != None:\r\n deathdate = datetime.strptime(mother_death_date, date_format).date()\r\n\r\n if birthdate > deathdate:\r\n print(\"ERROR: US09: \")\r\n\r\n def US09_main():\r\n result = conn.execute(query)\r\n rows = result.fetchall()\r\n\r\n for row in rows:\r\n father_id = row[0]\r\n mother_id = row[1]\r\n\r\n father_death_date = getDeathDate(father_id)\r\n mother_death_date = getDeathDate(mother_id)\r\n\r\n # query = \"SELECT CHILDREN FROM FAMILY WHERE HUSBAND_ID = \"+father_id\r\n result = conn.execute(\"SELECT CHILDREN FROM FAMILY WHERE HUSBAND_ID = ?\", (father_id,))\r\n cursor = result.fetchone()\r\n children = cursor[0]\r\n\r\n childrenList = formatChildrenData(children)\r\n\r\n if len(childrenList) > 0:\r\n for child in childrenList:\r\n child_birth_date = getChildBirthDate(child)\r\n\r\n if child_birth_date != None:\r\n birthdate = datetime.strptime(child_birth_date, date_format).date()\r\n\r\n if mother_death_date != None:\r\n deathdate = datetime.strptime(mother_death_date, date_format).date()\r\n\r\n if birthdate > deathdate:\r\n print(\"ERROR: US09: \" + child + \" is born after the death of his mother \" + mother_id)\r\n\r\n if father_death_date != None:\r\n deathdate = datetime.strptime(father_death_date, date_format).date()\r\n # difference = monthdelta(birthdate,deathdate) # Gets difference in months..\r\n difference = deathdate - birthdate\r\n months = difference.days / 30\r\n\r\n if deathdate < birthdate and months > 9:\r\n print(\r\n \"ERROR: US09: \" + child + \" is born 9 months after the death of his father \" + father_id)\r\n\r\n US09_main()\r\n\r\n\r\n#Aakanksha Gokhe\r\ndef US28():\r\n FAM_ID = []\r\n Children_ID = []\r\n #conn = sqlite3.connect('DATA.db')\r\n cur = conn.cursor()\r\n cur.execute(\"SELECT ID,CHILDREN FROM FAMILY\")\r\n rows = cur.fetchall()\r\n print(\"US28-\")\r\n for fid, chld in rows:\r\n FAM_ID.append(re.sub(r'[^@,0-9a-zA-Z ]+', '', str(fid)))\r\n Children_ID.append(re.sub(r'[^@,0-9a-zA-Z ]+', '', str(chld)))\r\n for pos, id in enumerate(FAM_ID):\r\n sib_dict = {}\r\n # name = []\r\n sib_list = Children_ID[pos].split(',')\r\n # print(sib_list)\r\n if (len(sib_list) > 1):\r\n for sid in sib_list:\r\n cur.execute(\"SELECT NAME,AGE FROM INDIVIDUAL WHERE ID=?\", (sid,))\r\n rows1 = cur.fetchall()\r\n # name.append(re.sub(r'[^A-Za-z ]+','',str(rows1[0][0])))\r\n age = int(re.sub(r'[^0-9]', '', str(rows1[0][1])))\r\n sib_dict[sid] = age\r\n # print(name)\r\n sorted_sib_dict = sorted(sib_dict.items(), key=operator.itemgetter(1), reverse=True)\r\n # print(sorted_sib_dict)\r\n print(\"FOR FAMILY ID - \", id)\r\n for pos, info in enumerate(sorted_sib_dict):\r\n cur.execute(\"SELECT NAME FROM INDIVIDUAL WHERE ID=?\", (info[0],))\r\n rn = cur.fetchall()\r\n name = re.sub(r'[^A-Za-z ]+', '', str(rn[0]))\r\n print(name, info[1])\r\n\r\ndef US06():\r\n HID = []\r\n WID = []\r\n Div_date = []\r\n\r\n def compare_dates(husb_death, wife_death, divorce):\r\n if (husb_death == 'NA'):\r\n wife_death = datetime.strptime(str(wife_death), \"%d %b %Y\")\r\n wife_death = datetime.date(wife_death)\r\n divorce = datetime.strptime(str(divorce), \"%d %b %Y\")\r\n divorce = datetime.date(divorce)\r\n if (divorce > wife_death):\r\n return True\r\n else:\r\n return False\r\n elif (wife_death == 'NA'):\r\n husb_death = datetime.strptime(str(husb_death), \"%d %b %Y\")\r\n husb_death = datetime.date(husb_death)\r\n divorce = datetime.strptime(str(divorce), \"%d %b %Y\")\r\n divorce = datetime.date(divorce)\r\n if (divorce > husb_death):\r\n return True\r\n else:\r\n return False\r\n else:\r\n husb_death = datetime.strptime(str(husb_death), \"%d %b %Y\")\r\n husb_death = datetime.date(husb_death)\r\n wife_death = datetime.strptime(str(wife_death), \"%d %b %Y\")\r\n wife_death = datetime.date(wife_death)\r\n divorce = datetime.strptime(str(divorce), \"%d %b %Y\")\r\n divorce = datetime.date(divorce)\r\n if (divorce > husb_death or divorce > wife_death):\r\n return True\r\n else:\r\n return False\r\n\r\n cur = conn.cursor()\r\n cur.execute(\"SELECT HUSBAND_ID,WIFE_ID,DIVORCED FROM FAMILY\")\r\n rows = cur.fetchall()\r\n for hid, wid, divdt in rows:\r\n HID.append(re.sub(r'[^@0-9a-zA-Z ]+', '', str(hid)))\r\n WID.append(re.sub(r'[^@0-9A-Za-z]+', '', str(wid)))\r\n Div_date.append(re.sub(r'[^0-9a-zA-Z ]+', '', str(divdt)))\r\n\r\n for pos, id in enumerate(HID):\r\n cur.execute(\"SELECT DEATH FROM INDIVIDUAL WHERE ID=?\", (id,))\r\n info = cur.fetchall()\r\n cur.execute(\"SELECT DEATH FROM INDIVIDUAL WHERE ID=?\", (WID[pos],))\r\n info1 = cur.fetchall()\r\n death_date_husb = re.sub(r'[^0-9a-zA-Z ]', '', str(info[0]))\r\n death_date_wife = re.sub(r'[^0-9a-zA-Z ]', '', str(info1[0]))\r\n if (Div_date[pos] != 'NA'):\r\n result = compare_dates(death_date_husb, death_date_wife, Div_date[pos])\r\n if (result):\r\n print(\"ERROR: US06: \", id, \" and \", WID[pos], \" are divorced after the death of either spouse\")\r\n\r\n#Shreyas Sule\r\ndef US20():\r\n result = conn.execute(\"SELECT ID,CHILDREN from FAMILY\")\r\n data = result.fetchall()\r\n\r\n alreadySearched = []\r\n\r\n def formatChildrenData(siblings):\r\n punctuation = [\"{\", \"}\", \",\", \"(\", \")\"]\r\n for characters in punctuation:\r\n siblings = siblings.replace(characters, \" \").strip()\r\n childrenData = siblings.split(\" \")\r\n return childrenData\r\n\r\n def us20():\r\n\r\n for family in data:\r\n famId = family[0]\r\n children = family[1]\r\n\r\n siblings = formatChildrenData(children)\r\n if (len(siblings) >= 1):\r\n for indi_id in siblings:\r\n flag = 0\r\n # Get Parents of the current child\r\n parent_query = conn.execute(\r\n \"SELECT HUSBAND_ID,WIFE_ID from FAMILY where ID in (SELECT CHILD from INDIVIDUAL where ID = ?)\",\r\n (indi_id,))\r\n myParents = parent_query.fetchone()\r\n\r\n my_parents_siblings = [] # this list will contain all the aunts and uncles of the current child\r\n if (myParents != None):\r\n father = myParents[0]\r\n mother = myParents[1]\r\n\r\n # get the id of family where my father is the child\r\n father_family_query = conn.execute(\"SELECT CHILD from INDIVIDUAL where ID = ?\", (father,))\r\n father_family = father_family_query.fetchall()\r\n father_family = [i[0] for i in father_family]\r\n\r\n if (father_family[0] != 'NA'):\r\n for fatherId in father_family:\r\n\r\n # Get siblings of my father i.e my aunts and uncles from father's side\r\n siblingsOfFatherQuery = conn.execute(\"SELECT CHILDREN from FAMILY where ID = ?\",\r\n (fatherId,))\r\n siblingsOfFather = siblingsOfFatherQuery.fetchall()\r\n siblingsOfFather = [i[0] for i in siblingsOfFather]\r\n siblingsOfFather = re.sub(r'[^@I0-9,]', '', siblingsOfFather[0])\r\n siblingsOfFather = siblingsOfFather.split(\",\")\r\n\r\n # Store all siblings of father except father himself.\r\n for fathersiblingId in siblingsOfFather:\r\n if (fathersiblingId != fatherId):\r\n my_parents_siblings.append(fathersiblingId)\r\n\r\n # Get siblings of my mother i.e my aunts and uncles from mother's side.\r\n mother_family_query = conn.execute(\"SELECT CHILD from INDIVIDUAL where ID = ?\", (mother,))\r\n mother_family = mother_family_query.fetchall()\r\n mother_family = [i[0] for i in mother_family]\r\n\r\n if (mother_family[0] != 'NA'):\r\n\r\n for motherId in mother_family:\r\n siblingsOfMotherQuery = conn.execute(\"SELECT CHILDREN from FAMILY where ID = ?\",\r\n (motherId,))\r\n siblingsOfMother = siblingsOfMotherQuery.fetchall()\r\n siblingsOfMother = [i[0] for i in siblingsOfMother]\r\n siblingsOfMother = re.sub(r'[^@I0-9,]', '', siblingsOfMother[0])\r\n siblingsOfMother = siblingsOfMother.split(\",\")\r\n\r\n # print(siblingsOfMother)\r\n\r\n # Store all siblings of mother except mother herself.\r\n for mothersiblingId in siblingsOfMother:\r\n if (mothersiblingId != motherId):\r\n my_parents_siblings.append(mothersiblingId)\r\n # print(\"list \"+str(my_parents_siblings))\r\n for my_parents_siblingsId in my_parents_siblings:\r\n my_parents_siblings_spouseQuery = conn.execute(\r\n \"SELECT HUSBAND_ID,WIFE_ID from FAMILY where ID in (SELECT SPOUSE from INDIVIDUAL where ID = ?)\",\r\n (my_parents_siblingsId,))\r\n my_parents_siblings_spouse = my_parents_siblings_spouseQuery.fetchone()\r\n\r\n if (my_parents_siblings_spouse != None):\r\n my_parents_siblings_spouse = list(my_parents_siblings_spouse)\r\n # print(my_parents_siblings_spouse)\r\n husband = my_parents_siblings_spouse[0]\r\n wife = my_parents_siblings_spouse[1]\r\n\r\n partner = \"\"\r\n if (husband != my_parents_siblingsId):\r\n partner = husband\r\n\r\n else:\r\n partner = wife\r\n # print(partner)\r\n if partner in my_parents_siblings:\r\n if partner in alreadySearched and my_parents_siblingsId in alreadySearched:\r\n flag = 1\r\n return flag, 1\r\n else:\r\n flag = 0\r\n alreadySearched.append(partner)\r\n alreadySearched.append(my_parents_siblingsId)\r\n return alreadySearched, flag, 0\r\n\r\n chk, f, x = us20()\r\n if (f == 0 and x == 0):\r\n invalidIds = \",\".join(chk)\r\n #for id in chk:\r\n # invalidIds += \",\" + id\r\n print(\"Error : US20 IDs-\" + invalidIds + \" are aunts and uncles and should not be married\")\r\n\r\n\r\ndef US30():\r\n livingMarried = []\r\n result = conn.execute(\"SELECT ID,ALIVE,SPOUSE from INDIVIDUAL\")\r\n data = result.fetchall()\r\n\r\n for indi in data:\r\n indi_id = indi[0]\r\n alive = indi[1]\r\n spouse = indi[2]\r\n\r\n # print(indi_id+\" \"+alive+\" \"+spouse)\r\n\r\n if (spouse != 'NA' and alive == 'True'):\r\n livingMarried.append(indi_id)\r\n\r\n str = \"\"\r\n for id in livingMarried:\r\n str += id + \" \"\r\n\r\n print(\"US30 : List of living married is : \" + str)\r\n\r\n\r\n#Rishi\r\n\r\ndef US31():\r\n Result_query = \"select ID,name,age from INDIVIDUAL where spouse='NA' and age>30 and death='NA'\"\r\n Final_result = conn.execute(Result_query)\r\n Final_value = Final_result.fetchall()\r\n for Each_row in Final_value:\r\n print(\"ERROR: US31: \" + Each_row[1].replace(\"/\", \" \") + \"is single and alive and has a age of\", Each_row[2], \" which is more than 30\")\r\n\r\n\r\ndef US23():\r\n query = \"select distinct I1.name,I1.birthday from INDIVIDUAL as I1,INDIVIDUAL as I2 where I1.ID!=I2.ID and I1.name=I2.name and I1.birthday=I2.birthday\"\r\n result = conn.execute(query)\r\n value = result.fetchall()\r\n\r\n for each_row in value:\r\n print(\"ERROR: US23:\", each_row[0].replace(\"/\", \" \"), \"has birthday on\", each_row[1], \" and appears more than one time\")\r\n\r\n\r\n\r\n#Shreyas Sule\r\ndef US19():\r\n result = conn.execute(\"SELECT ID,CHILDREN from FAMILY\")\r\n data = result.fetchall()\r\n chk = []\r\n\r\n def formatChildrenData(siblings):\r\n punctuation = [\"{\", \"}\", \",\", \"(\", \")\"]\r\n for characters in punctuation:\r\n siblings = siblings.replace(characters, \" \").strip()\r\n childrenData = siblings.split(\" \")\r\n return childrenData\r\n\r\n def us19FirstCousins():\r\n\r\n for familyData in data:\r\n famId = familyData[0]\r\n children = familyData[1]\r\n\r\n siblings = formatChildrenData(children)\r\n # print(siblings)\r\n if (len(siblings) >= 1):\r\n for indi_id in siblings:\r\n flag = 0\r\n # Get parents of current child\r\n parent_query = conn.execute(\r\n \"SELECT HUSBAND_ID,WIFE_ID from FAMILY where ID in (SELECT CHILD from INDIVIDUAL where ID = ?)\",\r\n (indi_id,))\r\n parents = parent_query.fetchone()\r\n # print(parents)\r\n # parents = list(parents)\r\n # print(parents)\r\n cousins = []\r\n parents_siblings = []\r\n if (parents != None):\r\n father = parents[0]\r\n # print(father)\r\n mother = parents[1]\r\n\r\n father_family_query = conn.execute(\"SELECT CHILD from INDIVIDUAL where ID = ?\", (father,))\r\n father_family = father_family_query.fetchall()\r\n father_family = [i[0] for i in father_family]\r\n # print(father_family)\r\n\r\n if (father_family[0] != 'NA'):\r\n\r\n for fatherId in father_family:\r\n siblingsOfFatherQuery = conn.execute(\"SELECT CHILDREN from FAMILY where ID = ?\",\r\n (fatherId,))\r\n siblingsOfFather = siblingsOfFatherQuery.fetchall()\r\n siblingsOfFather = [i[0] for i in siblingsOfFather]\r\n siblingsOfFather = re.sub(r'[^@I0-9,]', '', siblingsOfFather[0])\r\n siblingsOfFather = siblingsOfFather.split(\",\")\r\n # print(siblingsOfFather)\r\n # Store all siblings of father except father himself.\r\n for siblingId in siblingsOfFather:\r\n if (siblingId != fatherId):\r\n parents_siblings.append(siblingId)\r\n # print(parents_siblings)\r\n\r\n\r\n mother_family_query = conn.execute(\"SELECT CHILD from INDIVIDUAL where ID = ?\", (mother,))\r\n mother_family = mother_family_query.fetchall()\r\n mother_family = [i[0] for i in mother_family]\r\n\r\n if (mother_family[0] != 'NA'):\r\n\r\n for motherId in mother_family:\r\n siblingsOfMotherQuery = conn.execute(\"SELECT CHILDREN from FAMILY where ID = ?\",\r\n (motherId,))\r\n siblingsOfMother = siblingsOfMotherQuery.fetchall()\r\n siblingsOfMother = [i[0] for i in siblingsOfMother]\r\n siblingsOfMother = re.sub(r'[^@I0-9,]', '', siblingsOfMother[0])\r\n siblingsOfMother = siblingsOfMother.split(\",\")\r\n\r\n # print(siblingsOfMother)\r\n\r\n # Store all siblings of father except father himself.\r\n for siblingId in siblingsOfMother:\r\n if (siblingId != motherId):\r\n parents_siblings.append(siblingId)\r\n\r\n # print(parent_siblings+\" all siblings \")\r\n\r\n # print(parents_siblings)\r\n\r\n for parentSiblingIds in parents_siblings:\r\n\r\n \"\"\"\r\n parentSiblingIds = list(parentSiblingIds)\r\n parentSiblingIds = re.sub(r'[^@I0-9,]','',parentSiblingIds[0])\r\n parentSiblingIds = parentSiblingIds.split(',')\r\n \"\"\"\r\n # print(parentSiblingIds)\r\n\r\n\r\n cousinFamilyQuery = conn.execute(\"SELECT SPOUSE from INDIVIDUAL where ID = ?\",\r\n (parentSiblingIds,))\r\n\r\n cousinFamilyId = cousinFamilyQuery.fetchall()\r\n cousinFamilyId = [i[0] for i in cousinFamilyId]\r\n # print(cousinFamilyId)\r\n\r\n for cId in cousinFamilyId:\r\n\r\n if (cousinFamilyId != None):\r\n\r\n cousinsQuery = conn.execute(\"SELECT CHILDREN from FAMILY where ID = ?\", (cId,))\r\n cousinData = cousinsQuery.fetchone()\r\n if (cousinData != None):\r\n\r\n cousinData = list(cousinData)\r\n cousinData = re.sub(r'[^@I0-9,]', '', cousinData[0])\r\n cousinData = cousinData.split(',')\r\n for i in cousinData:\r\n cousins.append(i)\r\n # print(cousins)\r\n\r\n for id in cousins:\r\n mySpouseQuery = conn.execute(\r\n \"SELECT HUSBAND_ID,WIFE_ID from FAMILY where ID in (SELECT SPOUSE from INDIVIDUAL where ID = ?)\",\r\n (id,))\r\n mySpouse = mySpouseQuery.fetchone()\r\n if (mySpouse != None):\r\n mySpouse = list(mySpouse)\r\n husband = mySpouse[0]\r\n wife = mySpouse[1]\r\n # print(husband,wife)\r\n if husband in cousins and wife in cousins:\r\n if husband in chk and wife in chk:\r\n flag = 1\r\n return flag,1\r\n #h = husband\r\n #w = wife\r\n #flag = 1\r\n else:\r\n flag = 0\r\n chk.append(husband)\r\n chk.append(wife)\r\n return chk,flag,0\r\n #if (flag == 1):\r\n # flag1 = 1\r\n #print(\"ERROR: US19: ID-\", h, \" and \", w, \"are first cousins and married\")\r\n\r\n chk1,f,x = us19FirstCousins()\r\n if(f==0 and x==0):\r\n ids1 = \",\".join(chk1)\r\n print(\"ERROR: US19: IDs-\",ids1,\" are First cousins and married!\")\r\n\r\n\r\n# Pranit Kulkarni\r\ndef US16():\r\n query = \"SELECT ID,HUSBAND_NAME,CHILDREN FROM FAMILY\"\r\n\r\n result = conn.execute(query)\r\n rows = result.fetchall()\r\n\r\n for row in rows:\r\n family_id = row[0]\r\n last_name = (row[1].replace(\"/\", \"\").split(\" \"))[1]\r\n # print(last_name)\r\n\r\n isLastNameSame = True\r\n children = row[2]\r\n children = children.replace(\"{\", \"\")\r\n children = children.replace(\"}\", \"\")\r\n\r\n childIds = children.split(\",\")\r\n for childId in childIds:\r\n\r\n result1 = conn.execute(\"SELECT NAME FROM INDIVIDUAL WHERE ID = ? and GENDER = ?\", (childId, \"M\"))\r\n db_rows = result1.fetchall()\r\n\r\n for son_name in db_rows:\r\n names = son_name[0].replace(\"/\", \"\").split(\" \")\r\n son_last_name = names[1]\r\n\r\n if (last_name != son_last_name):\r\n isLastNameSame = False\r\n break\r\n\r\n if (isLastNameSame == False):\r\n print(\r\n \"ERROR: \" + \" US16: \" + names[0] + \" \" + names[1] + \" does not have the same family name \" + last_name)\r\n\r\n\r\n# Pranit Kulkarni\r\ndef US21():\r\n query1 = \"SELECT NAME, GENDER FROM INDIVIDUAL WHERE ID IN (SELECT HUSBAND_ID FROM FAMILY)\"\r\n result1 = conn.execute(query1)\r\n\r\n husbands = result1.fetchall()\r\n\r\n for row in husbands:\r\n if row[1] != \"M\":\r\n name = row[0].replace(\"/\", \"\")\r\n print(\"ERROR: \" + \" US21: \" + \"Husband \" + name + \" is not Male\")\r\n\r\n query2 = \"SELECT NAME, GENDER FROM INDIVIDUAL WHERE ID IN (SELECT WIFE_ID FROM FAMILY)\"\r\n\r\n result2 = conn.execute(query2)\r\n\r\n wives = result2.fetchall()\r\n\r\n for row in wives:\r\n\r\n if row[1] != \"F\":\r\n name = row[0].replace(\"/\", \"\")\r\n print(\"ERROR: \" + \" US21: \" + \"Wife \" + name + \" is not female\")\r\n\r\n\r\ndef US13(): # SIBLING SPACING BY SHREYAS SULE\r\n sql = \"SELECT ID,CHILDREN from FAMILY\"\r\n\r\n result = conn.execute(sql)\r\n data = result.fetchall()\r\n\r\n punctuation = [\"{\", \"}\", \",\"]\r\n\r\n myData = {}\r\n\r\n\r\n for row in data:\r\n id = row[0]\r\n children = row[1]\r\n for c in punctuation:\r\n children = children.replace(c, \" \").strip()\r\n childrenData = children.split(\" \")\r\n myData[id] = childrenData\r\n\r\n for key, value in myData.items():\r\n flag = True\r\n siblingDates = []\r\n Invalid_siblings = []\r\n for childId in value:\r\n query = conn.execute(\"SELECT BIRTHDAY from INDIVIDUAL where ID = ?\", (childId,))\r\n rows = query.fetchall()\r\n for birthdate in rows:\r\n siblingDates.append(birthdate[0])\r\n\r\n temp1 = -1\r\n temp2 = -1\r\n for i in range(len(siblingDates)):\r\n for j in range(i + 1, len(siblingDates)):\r\n child1 = datetime.strptime(siblingDates[i], '%d %b %Y').date()\r\n yearOfChild1 = child1.year\r\n monthOfChild1 = child1.month\r\n dayOfChild1 = child1.day\r\n\r\n child2 = datetime.strptime(siblingDates[j], '%d %b %Y').date()\r\n yearOfChild2 = child2.year\r\n monthOfChild2 = child2.month\r\n dayOfChild2 = child2.day\r\n\r\n if ((yearOfChild2 - yearOfChild1) < 1):\r\n if ((monthOfChild2 - monthOfChild1) < 8 or (dayOfChild2 - dayOfChild1) < 2):\r\n flag = False\r\n temp1 = i\r\n temp2 = j\r\n if value[temp1] not in Invalid_siblings:\r\n Invalid_siblings.append(value[temp1])\r\n if value[temp2] not in Invalid_siblings:\r\n Invalid_siblings.append(value[temp2])\r\n break\r\n\r\n if (flag == False):\r\n invalid_id_str = \",\".join(Invalid_siblings)\r\n print(\"ERROR: US13: \" + invalid_id_str +\" from \" + key + \" have invalid spacing\")\r\n\r\n\r\ndef US22():\r\n '''\r\n error_tag = \"ERROR: \" + \" US22: \"\r\n query1 = \"SELECT ID from INDIVIDUAL\"\r\n query2 = \"SELECT ID from FAMILY\"\r\n\r\n result1 = conn.execute(query1)\r\n result2 = conn.execute(query2)\r\n\r\n all_INDI_IDs = result1.fetchall()\r\n all_FAM_IDs = result2.fetchall()\r\n\r\n INDI_ID = []\r\n FAM_ID = []\r\n\r\n for ID in all_INDI_IDs:\r\n INDI_ID.append(ID[0])\r\n\r\n for i in range(len(INDI_ID)):\r\n for j in range(i + 1, len(INDI_ID)):\r\n if INDI_ID[i] == INDI_ID[j]:\r\n print(error_tag + INDI_ID[i] + \" is not unique\")\r\n\r\n for ID in all_FAM_IDs:\r\n FAM_ID.append(ID[0])\r\n\r\n for i in range(len(FAM_ID)):\r\n for j in range(i + 1, len(FAM_ID)):\r\n if FAM_ID[i] == FAM_ID[j]:\r\n print(error_tag + FAM_ID[i] + \" is not unique\")\r\n###################### NOTE AHEAD #######################################\r\nAdding a duplicate ID in GEDCOM File will stop the execution of creating a table in the intermediate stage, so\r\nfor the time being we created an erronous GEDCOM file and parsed it for the purpose of notifying the customer of\r\nDuplicate ID case.\r\n'''\r\n try:\r\n GEDCOM_parser.parse('FamilyTree1.ged')\r\n except:\r\n print(\"ERROR: US22: Duplicate IDs found in the GEDCOM\")\r\n\r\ndef US25():\r\n error_tag = \"ERROR: \" + \" US25: \"\r\n flag = 0\r\n c = []\r\n fam_id = []\r\n children_id = []\r\n name = []\r\n bday = []\r\n # conn = sqlite3.connect('GEDCOM_DATA.db')\r\n cur = conn.cursor()\r\n cur.execute(\"SELECT ID, CHILDREN FROM FAMILY\")\r\n rows = cur.fetchall()\r\n\r\n for fid, chid in rows:\r\n fam_id.append(re.sub(r'[^@,0-9a-zA-Z ]+', '', str(fid)))\r\n children_id.append(re.sub(r'[^@,0-9a-zA-Z ]+', '', str(chid)))\r\n\r\n for i, id in enumerate(fam_id):\r\n c = children_id[i].split(',')\r\n for j in c:\r\n if j == 'NA':\r\n flag = 1\r\n else:\r\n cur.execute(\"SELECT NAME, BIRTHDAY FROM INDIVIDUAL WHERE ID=?\", (j,))\r\n irows = cur.fetchall()\r\n for nm, bd in irows:\r\n name.append(re.sub(r'[^0-9a-zA-Z ]+', '', str(nm)))\r\n bday.append(re.sub(r'[^0-9a-zA-Z ]+', '', str(bd)))\r\n if flag == 1:\r\n print(error_tag + \"The family id \" + id + \" has no child\")\r\n else:\r\n if len(name) != len(set(name)) and len(bday) != len(set(bday)):\r\n print(error_tag + \"The children of family id \" + id + \" does not have unique first names!\")\r\n # else:\r\n # print(error_tag+\"The children of family id \"+ id+ \" has unique first names!\")\r\n\r\n\r\ndef US29():\r\n dead = []\r\n # conn = sqlite3.connect('GEDCOM_DATA.db')\r\n cur = conn.cursor()\r\n cur.execute(\"SELECT NAME FROM INDIVIDUAL WHERE ALIVE='False'\")\r\n rows = cur.fetchall()\r\n for row in rows:\r\n dead.append(re.sub(r'[^0-9a-zA-Z ]+', '', str(row)))\r\n # print(\"=======================LIST OF DECEASED============================\")\r\n for name in dead:\r\n print(\"US29: \" + name + \" is dead\")\r\n # print(name)\r\n\r\n\r\ndef US03():\r\n query1 = \"select NAME,AGE from INDIVIDUAL\"\r\n\r\n result1 = conn.execute(query1)\r\n\r\n value = result1.fetchall()\r\n first = value[0]\r\n\r\n for row in value:\r\n if (row[1] <= 0):\r\n print(\"ERROR: US03: \" + row[0].replace(\"/\", \" \") + \"has an invalid age since birthday is after death day\")\r\n\r\n\r\ndef US02():\r\n query1 = \"select NAME,BIRTHDAY,MARRIED from INDIVIDUAL AS I,FAMILY AS F where I.ID=F.HUSBAND_ID OR I.ID=F.WIFE_ID\"\r\n\r\n result1 = conn.execute(query1)\r\n\r\n value = result1.fetchall()\r\n first = value[0]\r\n\r\n i = 0\r\n a = ''\r\n b = ''\r\n for row in value:\r\n first_row = value[i]\r\n if(first_row[1]!='NA' and first_row[2]!='NA'):\r\n a = datetime.strptime(first_row[1], '%d %b %Y')\r\n a.strftime('%d %m %Y')\r\n\r\n b = datetime.strptime(first_row[2], '%d %b %Y')\r\n a.strftime('%d %m %Y')\r\n i += 1\r\n # print (first_row)\r\n if (a > b):\r\n print(\"ERROR: US02: \" + first_row[0].replace(\"/\",\r\n \" \") + \" is born after their own marriage which is not possible \")\r\n\r\n\r\ndef US11():\r\n HID = []\r\n WID = []\r\n HName = []\r\n WName = []\r\n div_date = []\r\n marr_date = []\r\n SP_ID = []\r\n check_list = []\r\n #conn = sqlite3.connect('DATA.db')\r\n cur = conn.cursor()\r\n cur.execute(\"SELECT SPOUSE FROM INDIVIDUAL WHERE LENGTH(SPOUSE)>5\")\r\n rows = cur.fetchall()\r\n for spid in rows:\r\n SP_ID = list(spid)\r\n SP_ID = SP_ID[0].split(',')\r\n for spid in SP_ID:\r\n cur.execute(\"SELECT MARRIED,DIVORCED,HUSBAND_ID,HUSBAND_NAME,WIFE_ID,WIFE_NAME FROM FAMILY WHERE ID=?\", (spid,))\r\n rows1 = cur.fetchall()\r\n for m, d, hid, hnm, wid, wnm in rows1:\r\n marr_date.append(re.sub(r'[^0-9a-zA-Z ]', '', str(m)))\r\n div_date.append(re.sub(r'[^0-9a-zA-Z ]', '', str(d)))\r\n HID.append(re.sub(r'[^@0-9a-zA-Z ]', '', str(hid)))\r\n HName.append(re.sub(r'[^0-9a-zA-Z ]', '', str(hnm)))\r\n WID.append(re.sub(r'[^@0-9a-zA-Z]', '', str(wid)))\r\n WName.append(re.sub(r'[^0-9a-zA-Z ]', '', str(wnm)))\r\n if len(HID) != len(set(HID)):\r\n for p, id in enumerate(set(HID)):\r\n div = div_date[p]\r\n if div != \"NA\":\r\n # do something when there is divorce date for the individual\r\n print(div)\r\n mdt1 = datetime.strptime(str(marr_date[p]), \"%d %b %Y\")\r\n mdt1 = datetime.date(mdt1)\r\n mdt2 = datetime.strptime(str(marr_date[p + 1]), \"%d %b %Y\")\r\n mdt2 = datetime.date(mdt2)\r\n div = datetime.strptime(str(div), \"%d %b %Y\")\r\n div = datetime.date(div)\r\n if mdt1 < mdt2:\r\n end = mdt2\r\n start = mdt1\r\n else:\r\n end = mdt1\r\n start = mdt2\r\n if start < div < end:\r\n continue\r\n else:\r\n check_list.append(id)\r\n print(\"ERROR: US11: ID-\", id, \"is in a bigamous relationship!\")\r\n else:\r\n mdt1 = datetime.strptime(str(marr_date[p]), \"%d %b %Y\")\r\n mdt1 = datetime.date(mdt1)\r\n mdt2 = datetime.strptime(str(marr_date[p + 1]), \"%d %b %Y\")\r\n mdt2 = datetime.date(mdt2)\r\n if mdt1 < mdt2:\r\n pos = p\r\n end = mdt2\r\n start = mdt1\r\n else:\r\n pos = p + 1\r\n end = mdt1\r\n start = mdt2\r\n cur.execute(\"SELECT DEATH FROM INDIVIDUAL WHERE ID=?\", (WID[pos],))\r\n rr = cur.fetchall()\r\n for d in rr:\r\n d = re.sub(r'[^0-9a-zA-Z ]', '', str(d))\r\n if d != \"NA\":\r\n d = datetime.strptime(str(d), \"%d %b %Y\")\r\n d = datetime.date(d)\r\n if start < d < end:\r\n continue\r\n else:\r\n check_list.append(id)\r\n print(\"ERROR: US11: ID-\", id, \"has bigamous relationship!\")\r\n else:\r\n print(\"ERROR: Insufficient data - The individual\", id,\r\n \" may or may not have a bigamous relationship\")\r\n if len(WID) != len(set(WID)):\r\n for p, id in enumerate(set(WID)):\r\n div = div_date[p]\r\n if div != \"NA\":\r\n print(div)\r\n # do something when there is divorce date for the individual\r\n mdt1 = datetime.strptime(str(marr_date[p]), \"%d %b %Y\")\r\n mdt1 = datetime.date(mdt1)\r\n mdt2 = datetime.strptime(str(marr_date[p + 1]), \"%d %b %Y\")\r\n mdt2 = datetime.date(mdt2)\r\n div = datetime.strptime(str(div), \"%d %b %Y\")\r\n div = datetime.date(div)\r\n if mdt1 < mdt2:\r\n end = mdt2\r\n start = mdt1\r\n else:\r\n end = mdt1\r\n start = mdt2\r\n if start < div < end:\r\n continue\r\n else:\r\n check_list.append(id)\r\n print(\"ERROR: US11: ID-\", id, \"is in a bigamous relationship!\")\r\n else:\r\n mdt1 = datetime.strptime(str(marr_date[p]), \"%d %b %Y\")\r\n mdt1 = datetime.date(mdt1)\r\n mdt2 = datetime.strptime(str(marr_date[p + 1]), \"%d %b %Y\")\r\n mdt2 = datetime.date(mdt2)\r\n if mdt1 < mdt2:\r\n pos = p\r\n end = mdt2\r\n start = mdt1\r\n else:\r\n pos = p + 1\r\n end = mdt1\r\n start = mdt2\r\n cur.execute(\"SELECT DEATH FROM INDIVIDUAL WHERE ID=?\", (HID[pos],))\r\n rr = cur.fetchall()\r\n for d in rr:\r\n d = re.sub(r'[^0-9a-zA-Z ]', '', str(d))\r\n if d != \"NA\":\r\n d = datetime.strptime(str(d), \"%d %b %Y\")\r\n d = datetime.date(d)\r\n if start < d < end:\r\n continue\r\n print(\"No individual is in a bigamous relationship!\")\r\n else:\r\n check_list.append(id)\r\n print(\"ERROR: US11: ID-\", id, \"has bigamous relationship!\")\r\n else:\r\n print(\"ERROR: Insufficient data - The individual\", id,\r\n \"may or may not have a bigamous relationship\")\r\n\r\n\r\ndef US10():\r\n def age_at_marriage(d1, d2):\r\n if d1=='NA' or d2=='NA':\r\n return -1\r\n else:\r\n age = 0\r\n d1 = datetime.strptime(d1, \"%d %b %Y\")\r\n d2 = datetime.strptime(d2, \"%d %b %Y\")\r\n age = d2.year - d1.year - ((d2.month, d2.day) < (d1.month, d1.day))\r\n return age\r\n husb_id =[]\r\n husb_bd = []\r\n wife_id = []\r\n wife_bd = []\r\n marr_date = []\r\n under_age = []\r\n #conn = sqlite3.connect('GEDCOM_DATA14-2.db')\r\n cur = conn.cursor()\r\n cur.execute(\"SELECT HUSBAND_ID, WIFE_ID, MARRIED FROM FAMILY\")\r\n rows = cur.fetchall()\r\n for husbid,wifeid,mardt in rows:\r\n husb_id.append(re.sub(r'[^@,0-9a-zA-Z ]+', '', str(husbid)))\r\n wife_id.append(re.sub(r'[^@,0-9a-zA-Z ]+', '', str(wifeid)))\r\n marr_date.append(re.sub(r'[^@,0-9a-zA-Z ]+', '', str(mardt)))\r\n for hid in husb_id:\r\n cur.execute(\"SELECT BIRTHDAY FROM INDIVIDUAL WHERE ID=?\",(hid,))\r\n rows1 = cur.fetchall()\r\n husb_bd.append(re.sub(r'[^0-9a-zA-Z ]+', '', str(rows1)))\r\n for wid in wife_id:\r\n cur.execute(\"SELECT BIRTHDAY FROM INDIVIDUAL WHERE ID=?\",(wid,))\r\n rows2 = cur.fetchall()\r\n wife_bd.append(re.sub(r'[^0-9a-zA-Z ]+', '', str(rows2)))\r\n for hid,hmdt,hbdt in zip(husb_id,marr_date,husb_bd):\r\n age = age_at_marriage(hbdt,hmdt)\r\n if age < 14 and age>=0:\r\n under_age.append(hid)\r\n print(\"ERROR: US10: \",hid,\" is under 14 years of age when he was married\")\r\n for wid,wmdt,wbdt in zip(wife_id,marr_date,wife_bd):\r\n age = age_at_marriage(wbdt,wmdt)\r\n if age < 14 and age>=0:\r\n under_age.append(wid)\r\n print(\"ERROR: US10: \",wid,\" is under 14 years of age when she was married\")\r\n\r\ndef US07():\r\n query = \"SELECT NAME,ALIVE,AGE,BIRTHDAY,DEATH FROM INDIVIDUAL\"\r\n\r\n error_tag = \"ERROR: US07: \"\r\n result = conn.execute(query)\r\n rows = result.fetchall()\r\n\r\n date_format = \"%d %b %Y\"\r\n today = datetime.today().date()\r\n\r\n for row in rows:\r\n name = row[0].replace(\"/\", \"\")\r\n\r\n if row[1] == \"False\":\r\n age = row[2]\r\n\r\n if (age > 150):\r\n print(error_tag + name + \" who is dead does not have valid age\")\r\n\r\n elif row[1] == \"True\":\r\n birthdateText = row[3]\r\n\r\n birthdate = datetime.strptime(birthdateText, date_format).date()\r\n difference = today - birthdate\r\n\r\n years = difference.days / 365\r\n\r\n if years >= 150:\r\n print(error_tag + name + \" who is alive has birthdate less than 150 years from now\")\r\n\r\n\r\ndef US36():\r\n query = \"SELECT NAME,ALIVE,DEATH FROM INDIVIDUAL\"\r\n\r\n result = conn.execute(query)\r\n rows = result.fetchall()\r\n\r\n date_format = \"%d %b %Y\"\r\n today = datetime.today().date()\r\n\r\n recent_deaths = []\r\n\r\n for row in rows:\r\n if row[1] == \"False\": # If the person is dead..\r\n\r\n death_date = datetime.strptime(row[2], date_format).date()\r\n difference = today - death_date\r\n\r\n if difference.days <= 30:\r\n recent_deaths.append(row[0].replace(\"/\", \"\")) # print Name\r\n\r\n if len(recent_deaths) > 0:\r\n for person in recent_deaths:\r\n print(\"ERROR: US36: \" + person + \" died in the last 30 days\")\r\n\r\n def deadRecently(name):\r\n\r\n if name in recent_deaths:\r\n return True\r\n\r\n return False\r\n\r\ndef US01():\r\n current = datetime.now().date()\r\n query1 = \"select NAME,BIRTHDAY,DEATH from INDIVIDUAL\"\r\n result1 = conn.execute(query1)\r\n value1 = result1.fetchall()\r\n i = 0\r\n date_format = \"%d %b %Y\"\r\n for row in value1:\r\n first_row = value1[i]\r\n\r\n a = datetime.strptime(first_row[1], '%d %b %Y').date()\r\n if (first_row[2] == 'NA'):\r\n pass\r\n else:\r\n c = datetime.strptime(first_row[2], '%d %b %Y').date()\r\n # c.strftime('%d %m %Y')\r\n # print c\r\n if (c == 'NA'):\r\n pass\r\n elif (c != 'NA' and (current < a or current < c)):\r\n\r\n print('ERROR: US01: ' + first_row[1].replace(\"/\", \" \") + ' is after the current date ')\r\n\r\n i += 1\r\n\r\n query2 = \"select HUSBAND_NAME,WIFE_NAME,MARRIED,DIVORCED from FAMILY\"\r\n result2 = conn.execute(query2)\r\n value2 = result2.fetchall()\r\n j = 0\r\n e = 'NA'\r\n for row in value2:\r\n first_row = value2[j]\r\n #d=datetime.strptime(first_row[2],'%d %b %Y').date()\r\n # print a\r\n if (first_row[3] == 'NA'):\r\n pass\r\n else:\r\n e = datetime.strptime(first_row[3], '%d %b %Y').date()\r\n\r\n if (e == 'NA'):\r\n pass\r\n elif (current < e):\r\n print(\"ERROR: US01: \", first_row[2].replace(\"/\", \" \"), \" is after the current date \")\r\n\r\n j += 1\r\n\r\ndef US12():\r\n query = \"SELECT I1.BIRTHDAY AS HUSBAND_BIRTHDAY, I2.BIRTHDAY AS WIFE_BIRTHDAY, I3.BIRTHDAY as CHILD_BIRTHDAY,F1.HUSBAND_NAME,F2.WIFE_NAME FROM INDIVIDUAL I1 INNER JOIN INDIVIDUAL I2 ON I1.ID <> I2.ID INNER JOIN INDIVIDUAL I3 ON I1.ID<>I3.ID AND I2.ID<> I3.ID INNER JOIN FAMILY F1 ON F1.HUSBAND_ID = I1.ID INNER JOIN FAMILY F2 ON F2.WIFE_ID=I2.ID INNER JOIN FAMILY F3 ON I3.CHILD=F3.ID WHERE F1.ID=F2.ID AND F3.ID=F1.ID\"\r\n result1 = conn.execute(query)\r\n value1 = result1.fetchall()\r\n i = 0\r\n date_format = \"%d %b %Y\"\r\n for row in value1:\r\n first_row = value1[i]\r\n husband_birthday = datetime.strptime(first_row[0], date_format).date()\r\n\r\n wife_birthday = datetime.strptime(first_row[1], date_format).date()\r\n\r\n child_birthday = datetime.strptime(first_row[2], date_format).date()\r\n\r\n husband_child_diff = husband_birthday - child_birthday\r\n\r\n husband_child_yearsdiff = husband_child_diff.days / 365\r\n\r\n wife_child_diff = husband_birthday - child_birthday\r\n\r\n wife_child_yearsdiff = wife_child_diff.days / 365\r\n if ((husband_child_yearsdiff > 80) and (wife_child_yearsdiff > 60)):\r\n print(\"ERROR: US12: \" + first_row[3].replace(\"/\", \" \") + \" or \" + first_row[4].replace(\"/\",\r\n \" \") + \" have a child either more than 60 years younger than mother or more than 80 years younger than father\")\r\n\r\n i += 1\r\n\r\ndef US14():\r\n query = \"SELECT ID,CHILDREN from FAMILY\"\r\n\r\n result = conn.execute(query)\r\n data = result.fetchall()\r\n\r\n def formatChildrenData(siblings):\r\n punctuation = [\"{\", \"}\", \",\"]\r\n for characters in punctuation:\r\n siblings = siblings.replace(characters, \" \").strip()\r\n childrenData = siblings.split(\" \")\r\n return childrenData\r\n def US14mulsib():\r\n\r\n for familyData in data:\r\n famId = familyData[0]\r\n children = familyData[1]\r\n\r\n siblings = formatChildrenData(children)\r\n\r\n noOfSiblings = len(siblings)\r\n #print(noOfSiblings)\r\n if (noOfSiblings >= 5):\r\n #print(\".\")\r\n famObj = Family.Family()\r\n famObj.setFamId(famId)\r\n\r\n for indi_id in siblings:\r\n birth_date_query = conn.execute(\"SELECT BIRTHDAY from INDIVIDUAL where ID = ?\", (indi_id,))\r\n birthdates = birth_date_query.fetchall()\r\n for dates in birthdates:\r\n #print(dates[0])\r\n myBirthDate = datetime.strptime(dates[0], '%d %b %Y').date()\r\n famObj.setBirthdate(myBirthDate)\r\n\r\n val,checkSiblings = famObj.validateNoOfSiblings()\r\n #print(checkSiblings)\r\n if (checkSiblings):\r\n print(\"ERROR: US14: \", famObj.getFamId() + \" has \" + str(noOfSiblings) + \" siblings \" + \"from which \"+ str(val)+ \" have same birth day\")\r\n US14mulsib()\r\n\r\n# User stories by Shreyas\r\nUS22()\r\nUS13()\r\nUS14()\r\nUS19()\r\nUS30()\r\nUS20()\r\n\r\n# User stories by Pranit\r\nUS16()\r\nUS21()\r\nUS07()\r\nUS36()\r\nUS09()\r\nUS18()\r\n\r\n# User stories by Aakanksha\r\nUS25()\r\nUS29()\r\nUS10()\r\nUS11()\r\nUS06()\r\nUS28()\r\n\r\n# User stories by Rishi\r\nUS03()\r\nUS02()\r\nUS01()\r\nUS12()\r\nUS23()\r\nUS31()\r\nconn.close()","sub_path":"Sprint 3/Sprint3.py","file_name":"Sprint3.py","file_ext":"py","file_size_in_byte":45958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"16498461","text":"from django.db import models\nfrom django.db.models.signals import pre_save\nfrom django.utils import timezone\nfrom django.utils.text import slugify\nfrom videos.models import Video\nfrom djangoflix.db.models import PublishStateOptions\nfrom djangoflix.db.receivers import publish_state_pre_save, slugify_pre_save\n\n\nclass PublishStateOptions(models.TextChoices):\n PUBLISH = 'PU','Published'\n DRAFT = 'DR','Draft'\n UNLISTED = 'UN','Unlisted'\n PRIVATE = 'PR','Private'\n\nclass PlaylistQuerySet(models.QuerySet):\n def published(self):\n now = timezone.now()\n return self.filter(\n state=PublishStateOptions.PUBLISH,\n publish_timestamp__lte= now \n )\n\nclass PlaylistManager(models.Manager):\n def get_queryset(self):\n return PlaylistQuerySet(self.model, using=self._db)\n\n def published(self):\n return self.get_queryset().published()\n\nclass Playlist(models.Model):\n PlaylistStateOptions = PublishStateOptions\n video = models.ForeignKey(Video, null=True, on_delete=models.SET_NULL)\n title = models.CharField(max_length=220)\n description = models.TextField(blank=True, null=True)\n slug = models.SlugField(blank=True, null=True)\n active = models.BooleanField(default=True)\n state = models.CharField(max_length=4, choices=PlaylistStateOptions.choices, default=PlaylistStateOptions.DRAFT)\n publish_timestamp = models.DateTimeField(auto_now_add=True, auto_now=False, blank=True, null=True)\n timestamp = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n\n objects = PlaylistManager()\n\n @property\n def is_published(self):\n return self.active\n \n\n\n\n\npre_save.connect(publish_state_pre_save, sender=Playlist)\npre_save.connect(slugify_pre_save, sender=Playlist)","sub_path":"src/playlists/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"635080987","text":"# ta funkcja oddaje obliczona wartosc za pomoca slowa return\ndef kwadrat(liczba):\n wynik = liczba**2\n print(wynik)\n return wynik\n\n# aby skorzystac ze zwracanej wartosci\n# trzeba ja przypisac do zmiennej\nwynik = kwadrat(2)\n\nprint(\"Wynik:\", wynik)\n\n\n","sub_path":"code/Day_6/funkcje_4.py","file_name":"funkcje_4.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"341378179","text":"\"\"\"\nFrom: https://oj.leetcode.com/problems/merge-k-sorted-lists/\nAuthor: Jing Zhou\nDate: Sep 10, 2014\nThought: use a priority queue. which is heapq in python\nTags: linked list, sort, merge\n\"\"\"\n\n\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n # @param a list of ListNode\n # @return a ListNode\n def mergeKLists(self, lists):\n h = []\n res = None\n head = None\n for node in lists:\n if node:\n heapq.heappush(h, (node.val, node))\n while h:\n v, smallest = heapq.heappop(h)\n if not res:\n res = smallest\n head = res\n else:\n res.next = smallest\n res = res.next\n nextInList = smallest.next\n if nextInList:\n heapq.heappush(h, (nextInList.val, nextInList))\n return head\n","sub_path":"week24/Jing/p_merge_k_sorted_list.py","file_name":"p_merge_k_sorted_list.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"617817796","text":"\ndef solution(X, A):\n leafSet = set()\n for index in range(len(A)):\n if A[index] <= X:\n leafSet.add(A[index])\n if len(leafSet) == X:\n return index\n return -1\n\nif __name__ == \"__main__\":\n X = 5\n A = [1, 3, 1, 4, 2, 3, 5, 4]\n print(solution(X, A))","sub_path":"Online Judge/Codility_Python3/frogRiverOne.py","file_name":"frogRiverOne.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"572734746","text":"\"\"\"\n\nOptimization for the Green Ball\n\nObserved Depth: depth image\nObserved Ir: infrared image\n\nDepth Start: Gaussian filtered depth image\nMaterial Start: From the material estimation step\n\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\nimport optimize\nimport utils\nfrom scipy.ndimage.filters import gaussian_filter\n\n# Makes experiments repeatable with the same noise\n# (If I call the script again I get the same results again)\nnp.random.seed(0)\n\ndepth_sensor_image = Image.open('../assets/optimization/greenball/depth.tiff')\ndepth_sensor_image = np.asarray(depth_sensor_image, dtype=np.float32) * 4.5\n\n# Small trick to lift the middle depth artifact from 0 just a little bit\ndepth_sensor_image[178:220, 236:276][np.where(depth_sensor_image[178:220, 236:276] == 0)] = .01\n\nir_sensor_image = Image.open('../assets/optimization/greenball/ir.tiff')\nir_sensor_image = np.asarray(ir_sensor_image, dtype=np.float32)\n\nk_d = Image.open('../assets/optimization/greenball/kd.tiff')\nk_d = np.asarray(k_d, dtype=np.float32)\n# k_d = 0.6 * np.ones((424, 512), dtype=np.float32)\n\n# Test with k_d = ir\n#k_d = np.clip(ir_sensor_image, 0.0, 0.4)\n\nk_s = Image.open('../assets/optimization/greenball/ks.tiff')\nk_s = np.asarray(k_s, dtype=np.float32)\n# k_s = 0.1 * np.ones((424, 512), dtype=np.float32)\n\n#n = Image.open('../assets/optimization/greenball/n.tiff')\n#n = np.asarray(n, dtype=np.float32)\nn = 50 * np.ones((424, 512), dtype=np.float32)\n\nmargin = np.zeros((424, 512), dtype=np.float32)\nmaterial_image = np.dstack((k_d, k_s, n, margin))\n\n# Diffuse Optimization\noptimizer = optimize.Optimizer(depth_sensor_image, ir_sensor_image,\n lightingmodel='specular', normalmodel='pca',\n depth_variance=0.0001, ir_variance=0.0001,\n w_d=10, w_m=5.0,\n pca_radius=1.25,\n max_iterations=150)\n\ndepth_sensor_image_filtered = gaussian_filter(depth_sensor_image, 2)\noptimizer.optimize(depth_sensor_image_filtered, material_image)\n\noptimizer.material_image_opt_[:,:,1] = np.clip(optimizer.material_image_opt_[:,:,1], 0, 1)\noptimizer.plot_results()\n\nutils.show_image(depth_sensor_image - optimizer.depth_image_opt_, 'depth difference opt')\n\nplt.show()\n\n\n","sub_path":"optimization/thesis_results_3.py","file_name":"thesis_results_3.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"597972739","text":"\r\nfrom django.contrib import admin\r\nfrom django.urls import path\r\nfrom . import views\r\n\r\n#urlpatterns = [\r\n # path('admin/', admin.site.urls),\r\n # path('',views.index,name='index'),\r\n # path('about',views.about,name='about'),\r\n#]\r\n\r\n#urlpatterns = [\r\n # path('admin/', admin.site.urls),\r\n # path('',views.index,name='index'),\r\n # path('removepunc',views.removepunc,name='rempunc'),\r\n #path('capitalizefirst',views.capfirst,name='capfirst'),\r\n #path('newlineremove',views.newlineremove,name='newlineremove'),\r\n #path('spaceremove',views.spaceremove,name='spaceremove'),\r\n #path('charcount',views.charcount,name='charcount'),\r\n#]\r\n\r\nurlpatterns = [\r\n path('admin/', admin.site.urls),\r\n path('',views.index,name='index'),\r\n path('analyze',views.analyze,name='analyze'),\r\n path('aboutus',views.aboutus,name='aboutus'),\r\n path('contactus',views.contactus,name='contactus'),\r\n]\r\n\r\n\r\n","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"10214095","text":"import gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk\n\nclass Mi_Ventana(Gtk.Window):\n\tdef __init__(self, *args, **kwargs):\n\t\t\n\t\tsuper(Mi_Ventana, self).__init__(*args, **kwargs)\n\t\tself.set_size_request(500, 300)\n\t\tself.connect('delete-event', Gtk.main_quit)\n\n\t\tself.agregar_contenedor()\n\t\tself.agregar_entrada()\n\t\tself.agregar_entrada2()\n\t\tself.agregar_boton()\n\t\tself.agregar_boton2()\n\t\tself.agregar_lista()\n\t\tself.agregar_lista2()\n\n\tdef agregar_contenedor(self):\n\t\tself.contenedor = Gtk.Grid()\n\t\tself.contenedor.set_column_homogeneous(True)\n\t\tself.add(self.contenedor)\n\n\tdef agregar_entrada(self):\n\t\tself.entrada = Gtk.Entry()\n\t\tself.entrada_monto = Gtk.Entry()\n\t\tself.contenedor.attach(self.entrada, 0, 0, 3, 1)\n\t\tself.contenedor.attach_next_to(\n\t\t\tself.entrada_monto,\n\t\t\tself.entrada,\n\t\t\tGtk.PositionType.RIGHT,\n\t\t\t1,\n\t\t\t1\t\n\n\t\t)\n\tdef agregar_entrada2(self):\n\t\tself.entrada2 = Gtk.Entry()\n\t\tself.entrada_monto2 = Gtk.Entry()\n\n\t\tself.contenedor.attach(self.entrada2, 0, 1, 3, 1)\n\t\tself.contenedor.attach_next_to(\n\t\t\tself.entrada_monto2,\n\t\t\tself.entrada2,\n\t\t\tGtk.PositionType.RIGHT,\n\t\t\t1,\n\t\t\t1\t\n\n\t\t)\n\n\t\t\n\n\n\tdef agregar_boton(self):\n\t\tself.boton = Gtk.Button('Activos')\n\t\tself.contenedor.attach_next_to(\n\t\t\tself.boton,\n\t\t\tself.entrada2,\n\t\t\tGtk.PositionType.BOTTOM,\n\t\t\t2,\n\t\t\t2\n\t\t)\n\tdef agregar_boton2(self):\n\t\tself.boton2 = Gtk.Button('Agregar Pasivos')\n\t\tself.contenedor.attach_next_to(\n\t\t\tself.boton2,\n\t\t\tself.entrada2,\n\t\t\tGtk.PositionType.BOTTOM,\n\t\t\t4,\n\t\t\t2\n\t\t)\n\t\tself.boton.connect('clicked', self.agregar_fila)\n\t\tself.boton2.connect('clicked', self.agregar_fila2)\n \n\tdef agregar_lista(self):\n\n\t self.modelo = Gtk.ListStore(str, float)\n\t #self.modelo.append(['Valor1', 1.5])\n\n\t self.lista_arvhivos = Gtk.TreeView(self.modelo)\n\n\t descripcion = Gtk.CellRendererText()\n\t columna_descripcion = Gtk.TreeViewColumn(\n\t \t'Descripcion', \n\t \tdescripcion, \n\t \ttext=1\n\t )\n\n\t monto = Gtk.CellRendererText()\n\t columna_monto = Gtk.TreeViewColumn('Monto', monto, text=0)\n\n\t self.lista_arvhivos.append_column(columna_descripcion)\n\t self.lista_arvhivos.append_column(columna_monto)\n\n\t self.contenedor.attach_next_to(\n\t \tself.lista_arvhivos,\n\t \tself.boton,\n\t \tGtk.PositionType.BOTTOM,\n\t \t2,\n\t \t2\n\t )\n\tdef agregar_lista2(self):\n\n\t self.modelo2 = Gtk.ListStore(str, float)\n\t #self.modelo.append(['Valor1', 1.5])\n\n\t self.lista_arvhivos2 = Gtk.TreeView(self.modelo2)\n\n\t descripcion2 = Gtk.CellRendererText()\n\t columna_descripcion2 = Gtk.TreeViewColumn(\n\t \t'Descripcion', \n\t \tdescripcion2, \n\t \ttext=1\n\t )\n\n\t monto2 = Gtk.CellRendererText()\n\t columna_monto2 = Gtk.TreeViewColumn('Monto', monto2, text=0)\n\n\t self.lista_arvhivos2.append_column(columna_descripcion2)\n\t self.lista_arvhivos2.append_column(columna_monto2)\n\n\t self.contenedor.attach_next_to(\n\t \tself.lista_arvhivos2,\n\t \tself.lista_arvhivos,\n\t \tGtk.PositionType.RIGHT,\n\t \t2,\n\t \t2\n\t )\n\t #self.modelo.append(['valor 2',2.0])\n\n\tdef agregar_fila(self, btn):\n\t\ttexto = self.entrada.get_text()\n\t\tmonto = self.entrada_monto.get_text()\n\t\tself.modelo.append([texto, float(monto)])\n\n\tdef agregar_fila2(self, btn):\n\t\ttexto2 = self.entrada2.get_text()\n\t\tmonto2 = self.entrada_monto2.get_text()\n\t\tself.modelo2.append([texto2, float(monto2)])\n\t\n\n\nif __name__ == '__main__':\n\tventana = Mi_Ventana()\n\tventana.show_all()\n\tGtk.main()","sub_path":"python-GTK/balance general.py","file_name":"balance general.py","file_ext":"py","file_size_in_byte":3396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"107953173","text":"from queue import Queue\nfrom concurrent.futures import ThreadPoolExecutor as Pool\n\n\n\"\"\"Collection of scrapper classes which glue all the other framework components\ntogether.\n\"\"\"\n\n\nclass BaseScrapper:\n \"\"\"Base class for other scrappers.\n\n Class variables:\n reader - reader object, implementing generator method read(), which\n provides input data for scrapping\n writer - writer object, implementing thread safe method write(), which\n allows to store scrapped data\n errors - writer object for storing errors\n requires - list of scrapper classes which are required to run before\n \"\"\"\n\n reader = None\n writer = None\n errors = writer\n requires = []\n\n def scrape_all(self):\n \"\"\"Takes all the data from the reader, processes it and passes to\n writer.\"\"\"\n with self.writer as writer:\n for url in self.reader.read():\n self.write(self.scrape(url))\n\n def scrape(self, url):\n \"\"\"Abstract method retrieving the data from url and processing it.\n\n Arguments:\n url - url to be scrapped\n \"\"\"\n pass\n\n def write(self, result):\n \"\"\"Writes the result to writer or error depending on success of\n scraping.\n\n Arguments:\n result - tuple containing boolean scraping result as first element and\n processed data or error message as second\n \"\"\"\n status, data = result\n if status is True:\n self.writer.write(data)\n else:\n self.errors.write(data)\n\n\nclass ConcurrentScrapper(BaseScrapper):\n \"\"\"Multithreaded scrapper.\"\"\"\n from settings import THREAD_NO\n\n def __init__(self):\n self.results = Queue(maxsize=self.THREAD_NO)\n\n def scrape_all(self):\n \"\"\"Takes all the data from the reader, processes it and passes to\n writer. Scrapping takes place concurrently in number of threads based\n on THREAD_NO value insettings.py.\"\"\"\n with Pool(max_workers=self.THREAD_NO) as pool, self.writer as writer:\n for result in pool.map(self.scrape, self.reader.read()):\n self.write(result)","sub_path":"menel/scrappers.py","file_name":"scrappers.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"610040057","text":"import logging\nimport pygame\nimport time\nimport random \nimport os\nimport math\n\nos.chdir('D:/SkySpiriT/NTU/PBC/Project/') #讀取檔案目錄\n\nlogging.basicConfig(level=logging.DEBUG)\npygame.init()\n\n\ndisplay_width = 1280\ndisplay_height = 960\ngameDisplay = pygame.display.set_mode((display_width, display_height)) #Displays(width,length)\nriverDisplay = pygame.Surface((1280,960)) #建立一個river圖層\n\n\npygame.display.set_caption('拯救台大校長大作戰')\nclock = pygame.time.Clock()\nblack = (0, 0, 0)\nwhite = (255, 255,255)\nred = (255,0,0)\n\nblue0=(149,220,255)\nblue1=(138,208,255)\nblue2=(128,197,253)\n\nColorlist=[blue0,blue1,blue2]\n\n\nclass mainCharacter():\n\tdef __init__(self,x,y,width,height):\n\t\tself.x=x\n\t\tself.y=y\n\t\tself.width=width\n\t\tself.height=height\n\n\t\t\nclass obstacle():\n\tdef __init__(self,x,y,width,height,movespeed):\n\t\tself.x=x\n\t\tself.y=y\n\t\tself.width=width\n\t\tself.height=height\n\t\tself.movespeed=movespeed\n\nclass figure():\n\tdef __init__(self,x,y,width,height):\n\t\tself.x=x\n\t\tself.y=y\n\t\tself.width=width\n\t\tself.height=height\n\n\n## this is to display numbers in text, in this case GPA. \nclass text():\n\tdef __init__(self,content,size,color,x,y):\n\n\t\tself.content=content\n\t\tself.size=size\n\t\tself.color=color\n\t\tself.x=x\n\t\tself.y=y\n\t\t\n\n\n####### text\n## dodged 想要之後再隨機出現在螢幕上恭喜他dodged了幾人\n\n'''\ndef things_dodged(count):\n\tfont = pygame.font.SysFont(None, 25)\n\ttext = font.render(\"dodged\" + str(count), True, black)\n\tgameDisplay.blit(text,(0,0))\n'''\n\ndef set_MainCharacter(character,img):\n\tgameDisplay.blit(img,(character.x,character.y))\n\t\ndef set_Obstacle(obstacle,img):\n\tgameDisplay.blit(img,(obstacle.x,obstacle.y))\n\t\ndef set_Figure(figure,img):\n\tgameDisplay.blit(img,(figure.x,figure.y))\n\n## setting text on screen\ndef set_Text(text):\n\tnow_text=pygame.font.Font(\"freesansbold.ttf\",text.size)\n\tnow_text=now_text.render(text.content,True,text.color)\n\tnow_text_rect=now_text.get_rect()\n\tnow_text_rect.center=(text.x,text.y)\n\tgameDisplay.blit(now_text,now_text_rect.center)\n\tpygame.display.update()\t\n\t\ndef set_ColorBlock(layer,color,y,x,width,height):\n\tpygame.draw.rect(layer,color, pygame.Rect(y, x, width, height))\n\t\ndef set_River(layer,block_width,block_height,colorlist):\n\t\tfor i in range(0,1281,block_width):\n\t\t\tfor j in range(0,641,block_height):\n\t\t\t\tset_ColorBlock(layer,colorlist[random.randrange(0,len(colorlist))],i,j,block_width,block_height)\n\n\n\ndef game_loop():\n\n\tboatImg0 = pygame.image.load(\"boat.png\") ##uploading image\n\tboatImg1 = pygame.image.load(\"boat1.png\")\n\tboatImg2 = pygame.image.load(\"boat2.png\")\n\tboatList=[boatImg0,boatImg1,boatImg2]\n\t\n\tbikeImg = pygame.image.load(\"bike.png\")\n\tpeopleImg = pygame.image.load(\"people.png\")\t\n\tgpaImg = pygame.image.load(\"GPA.png\")\n\thpbarImg = pygame.image.load(\"HPbar.png\")\n\n\tboat = mainCharacter(display_width*0.1 , display_height*0.75 , 96 , 96 ) #set status of object\n\t\n\tbike = obstacle(1280,random.randrange(display_height*320/960, display_height*1-96),96,96,-10)\n\tpeople = obstacle(1280,random.randrange(display_height*320/960, display_height*1-96),96,96,-5)\n\tobstaclelist=[bike,people]## estabilish a obstacle list\n\t\n\tgpa_icon=figure(30,50,96,96)\n\n\tGPA=4.3\n\t# def __init__(self,text,size,color,x,y):\n\tnow_gpa=text(str(GPA),60,black,gpa_icon.width+96,gpa_icon.height) \n\t\n\t## create ending object\n\tending_message=text(\"You have failed your semester!\",100,red,display_width*(1/2),display_height*(2/3))\n\n\n\tx_change = 0 #set constent\n\ty_change = 0\t\n\t# dodged=0 ## dodged ; congratulating on dodge??\n\n\t\n\tboatImgNum=0\t#控制 boat gif fps\n\tframe=0 \n\t\n\tgameExit = False\n\t\n\twhile not gameExit:\n\t###########event handling loop###########\n\t\t\n\t\tfor event in pygame.event.get(): #it gets any event that happens...movenment of mouse or clicking etc\n\t\t\tif event.type == pygame.QUIT: # when we will click X it will quit the window\n\t\t\t\tlogging.info(\"X is pressed\")\n\t\t\t\tpygame.quit()\n\t\t\t\tquit()\n\n\t\t\t################This event will handle situation when ever any key will be pressed ##################################\n\t\t\tif event.type == pygame.KEYDOWN:\n\t\t\t\tif event.key == pygame.K_LEFT:#pressing left arrow will decrease x-axis coordinate\n\t\t\t\t\tx_change = -7\n\t\t\t\t\t\n\t\t\t\tif event.key == pygame.K_RIGHT:#pressing right arrow will increase x-axis coordinate\n\t\t\t\t\tx_change = 7\n\t\t\t\t\t\n\t\t\t\tif event.key == pygame.K_UP:#pressing UP arrow will decrease Y-axis coordinate\n\t\t\t\t\ty_change = -7\n\n\t\t\t\tif event.key == pygame.K_DOWN:#pressing Down arrow will increase x-axis coordinate\n\t\t\t\t\ty_change = 7\n\t\t\t\t\t\n\t\t\t################This event will handle situation when ever any key will be released ##################################\n\t\t\tif event.type == pygame.KEYUP:\n\t\t\t\tif event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\n\t\t\t\t\tx_change = 0\n\t\t\t\tif event.key == pygame.K_UP or event.key == pygame.K_DOWN:\n\t\t\t\t\ty_change = 0\n\n\t\t\n\t\tboat.x += x_change\n\t\tboat.y += y_change\n\n\t\tgameDisplay.fill(white)\n\t\tif frame % 20 ==0:\t\t\t\t#將river set在 riverDisplay圖層上(每10幀改變一次riverDisplay樣貌)\n\t\t\tset_River(riverDisplay,20,20,Colorlist)\t\n\t\tgameDisplay.blit(riverDisplay, (0,320)) #再將riverDisplay放在gameDisplay圖層\n\t\t\n\t\t\n\t\t#####################將boatImg 組成gif 並設定幾次畫面(frame)更新會換下一張###################\n\t\tif boatImgNum!=2: \n\t\t\tset_MainCharacter(boat,boatList[boatImgNum])\n\t\t\tframe+=1\n\t\t\tif frame % 10 == 0:\n\t\t\t\tboatImgNum += 1\n\n\t\telif boatImgNum==2:\n\t\t\tset_MainCharacter(boat,boatList[boatImgNum])\n\t\t\tframe+=1\n\t\t\tif frame % 10 == 0:\n\t\t\t\tboatImgNum = 0\n\n\t\t\n\t\tset_Obstacle(bike,bikeImg)\n\t\tbike.x += bike.movespeed\n\t\tset_Obstacle(people,peopleImg)\n\t\tpeople.x += people.movespeed\n\t\t\n\t\t\n\t\t\n\t\t#things_dodged(dodged)\n\t\t\n\t\t\n\t\tif boat.y < display_height*320/960:\n\t\t\tboat.y = display_height*320/960 \n\t\t\t\n\t\tif boat.y + boat.height> display_height:\n\t\t\tboat.y = display_height - boat.height\n\t\t\t\n\t\tif boat.x < 0:\n\t\t\tboat.x = 0\n\t\t\n\t\tif boat.x +boat.width > display_width :\n\t\t\tboat.x = display_width - boat.width\n\t\t\n\t\t\n\t\tif bike.x < 0:\n\t\t\tbike.y = random.randrange(display_height*320/960, display_height-bike.height)\n\t\t\tbike.x = 1280\n\t\t\tbike.movespeed -= 0.5\t\n\t\t\t\n\t\tif people.x < 0:\n\t\t\tpeople.y = random.randrange(display_height*320/960, display_height-people.width)\n\t\t\tpeople.x = 1280\n\t\t\tpeople.movespeed -= 0.5\t\t\t\t\n\t\t\n\n\t\t#根據gpa 每0.5顯示一格hpbar\n\t\tfor i in range(0,int(math.floor(float(now_gpa.content)/0.5))):\n\t\t\thpbar = figure( gpa_icon.x+50*(i+1) , gpa_icon.y , 96 , 96)\n\t\t\tset_Figure(hpbar,hpbarImg)\n\t\t\t\n\t\tset_Figure(gpa_icon,gpaImg)\t\t\t\n\t\tset_Text(now_gpa)\n\t\t\n\t\tif float(now_gpa.content)<=0:\n\t\t\t#display message\n\t\t\t#gameDisplay.fill(white)\n\t\t\tset_Text(ending_message)\n\t\t\ttime.sleep(2)\n\t\t\tgame_loop()\n\t\t\t#gameExit()\n\n\t\tfor obstacles in obstaclelist:\n\t\t\tcrossed_times=0\n\n\t\t\tif boat.x+boat.width > obstacles.x and boat.x+boat.width < obstacles.x+obstacles.width:\n\n\t\t\t\tif (boat.y > obstacles.y and boat.y< obstacles.y+obstacles.y+obstacles.height) or (boat.y+boat.height > obstacles.y and boat.y+boat.height < obstacles.y+obstacles.y+obstacles.height):\n\t\t\t\t\tif crossed_times==1:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\telse:\n\t\t\t\t\t\tchanged_gpa=\"%-10.2f\"%(float(now_gpa.content)-0.1) ## 測試0.02是減少最好的\n\t\t\t\t\t\tnow_gpa.content=str(changed_gpa)\n\t\t\t\t\t\tcrossed_times+=1\n\n\t\t\t\t\tprint(\"crossed obeject\")\n\t\t\t\t\tprint(\"-0.03\",\" 1 time\")\n\t\t\t\n\t\t\n\t\tif float(now_gpa.content)<2.5:\n\t\t\tnow_gpa.color=red\n\t\telse:\n\t\t\tnow_gpa.color=black\n\t\t\t\n\n\t\tpygame.display.update()\n\t\tclock.tick(60) ## it will just make thing move faster\n\nlogging.info(\"calling the game loop\")\ngame_loop()\nlogging.info(\"calling the quit function\")\npygame.quit()\nlogging.info(\"I am the last line of the code\")\nquit()","sub_path":"拯救台大校長大作戰/Resource/Stage2-river/OldVision/ver4.0.py","file_name":"ver4.0.py","file_ext":"py","file_size_in_byte":7620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"503072469","text":"from utils import compare_types, conforms, is_subset, join, join_list, set_common_ancestor, smart_add\nimport cmp.visitor as visitor\nfrom cmp.semantic import AutoType, Context, ErrorType, Scope, SelfType, SemanticError, Type\nfrom AST import AssignNode, AttrDeclarationNode, BlockNode, CallNode, CaseDeclarationNode, CaseVarNode, ClassDeclarationNode, ComparisonNode, ConstantBoolNode, ConstantNumNode, ConstantStringNode, FuncDeclarationNode, HyphenNode, IfDeclarationNode, InstantiateNode, IsVoidDeclarationNode, LetDeclarationNode, NotNode, OperationNode, ProgramNode, VarDeclarationNode, VariableNode, WhileDeclarationNode\n\nWRONG_SIGNATURE = 'Method \"%s\" already defined in \"%s\" with a different signature.'\nSELF_IS_READONLY = 'Variable \"self\" is read-only.'\nLOCAL_ALREADY_DEFINED = 'Variable \"%s\" is already defined.'\nINCOMPATIBLE_TYPES = 'Cannot convert \"%s\" into \"%s\".'\nVARIABLE_NOT_DEFINED = 'Variable \"%s\" is not defined.'\nINVALID_OPERATION = 'Operation is not defined between \"%s\" and \"%s\".'\n\nclass InferenceGatherer:\n def __init__(self, context:Context):\n self.context = context\n self.current_type = None\n self.current_method = None\n self.current_attrb = None\n self.inference_graph = dict()\n self.errors = []\n\n @visitor.on('node')\n def visit(self, node, scope):\n pass\n\n @visitor.when(ProgramNode)\n def visit(self, node:ProgramNode) -> Scope:\n scope = Scope()\n for declaration in node.declarations:\n self.visit(declaration, scope.create_child())\n \n return scope\n \n @visitor.when(ClassDeclarationNode)\n def visit(self, node:ClassDeclarationNode, scope):\n self.current_type = self.context.get_type(node.id)\n scope.define_variable(\"self\", self.current_type)\n for attr in self.current_type.attributes:\n scope.define_variable(attr.name, attr.type)\n \n for feature in node.features:\n self.visit(feature, scope)\n \n @visitor.when(AttrDeclarationNode)\n def visit(self, node, scope):\n self.current_attrb = self.current_type.get_attribute(node.id)\n node_type = self.update_type(self.current_attrb.type)\n\n if not node.expr:\n self.current_attrb = None\n node.inferenced_type = node_type\n return\n \n self.visit(node.expr, scope)\n node_expr = self.update_type(node.expr.inferenced_type)\n node_expr = conforms(node_expr, node_type)\n node.expr.inferenced_type = node_expr\n\n node.inferenced_type = node_type# if len(node_expr.type_set) else ErrorType()\n \n var = scope.find_variable(node.id)\n var.type = node.inferenced_type\n self.current_attrb = None\n \n @visitor.when(FuncDeclarationNode)\n def visit(self, node, scopex):\n scope = scopex.create_child()\n self.current_method = self.current_type.get_method(node.id)\n for idx, typex in zip(self.current_method.param_names, self.current_method.param_types):\n scope.define_variable(idx, typex)\n \n self.visit(node.body, scope)\n ret_type_decl = self.update_type(self.current_method.return_type)\n ret_type_expr = self.update_type(node.body.inferenced_type)\n ret_type_expr = conforms(ret_type_expr, ret_type_decl)\n node.body.inferenced_type = ret_type_expr\n\n if isinstance(self.current_method.return_type, AutoType):\n auto_return = self.current_method.return_type\n ret_type_decl = conforms(ret_type_decl, ret_type_expr)\n if is_subset(ret_type_decl, auto_return):\n self.update_graph(ret_type_decl, ret_type_expr)\n self.current_method.return_type = ret_type_decl\n\n node.inferenced_type = ret_type_decl\n self.current_method = None\n \n @visitor.when(BlockNode)\n def visit(self, node, scope):\n for expr in node.body:\n self.visit(expr, scope)\n node.inferenced_type = node.body[-1].inferenced_type\n\n @visitor.when(IfDeclarationNode)\n def visit(self, node, scope):\n self.visit(node.ifexpr, scope)\n ifexpr_type = node.ifexpr.inferenced_type\n bool_type = self.context.get_type(\"Bool\")\n if isinstance(ifexpr_type, AutoType):\n ifexpr_type.set_upper_limmit([bool_type])\n\n self.visit(node.thenexpr, scope)\n then_type = self.update_type(node.thenexpr.inferenced_type)\n self.visit(node.elseexpr, scope)\n else_type = self.update_type(node.elseexpr.inferenced_type)\n\n joined = join(then_type, else_type)\n if not isinstance(joined, ErrorType):\n type_sets, heads = joined\n node.inferenced_type = AutoType(\"IF\", heads, type_sets)\n else:\n node.inferenced_type = ErrorType()\n\n @visitor.when(CaseDeclarationNode)\n def visit(self, node, scope:Scope):\n self.visit(node.expr, scope)\n self.update_type(node.expr.inferenced_type)\n\n type_list = []\n for var in node.casevars:\n child = scope.create_child()\n self.visit(var, child)\n type_list.append(var.inferenced_type)\n \n node_type = join_list(type_list)\n node.inferenced_type = node_type\n \n @visitor.when(WhileDeclarationNode)\n def visit(self, node, scope):\n self.visit(node.whileexpr, scope)\n pred_type = self.update_type(node.whileexpr.inferenced_type)\n bool_type = self.context.get_type(\"Bool\")\n if isinstance(pred_type, AutoType):\n pred_type.set_upper_limmit([bool_type])\n\n self.visit(node.bodyexpr, scope)\n self.update_type(node.bodyexpr.inferenced_type)\n node.inferenced_type = self.context.get_type(\"Object\")\n \n @visitor.when(LetDeclarationNode)\n def visit(self, node, scope):\n child = scope.create_child()\n for var in node.letvars:\n self.visit(var, child)\n self.visit(node.expr, child)\n node.inferenced_type = self.update_type(node.expr.inferenced_type)\n\n @visitor.when(CaseVarNode)\n def visit(self, node, scope):\n try:\n node_type = self.context.get_type(node.type, selftype=False, autotype=False)# if node.type != \"SELF_TYPE\" else SelfType()\n except SemanticError as err:\n node_type = ErrorType()\n scope.define_variable(node.id, node_type)\n self.visit(node.expr, scope)\n node.inferenced_type = self.update_type(node.expr.inferenced_type)\n\n @visitor.when(VarDeclarationNode)\n def visit(self, node, scope):\n try:\n node_type = self.context.get_type(node.type)\n except SemanticError as err:\n node_type = ErrorType()\n \n if not scope.is_local(node.id):\n scope.define_variable(node.id, node_type)\n node.define = True\n else:\n node.define = False\n self.AddError(f\"Declaring Variable \\\"{node.id}\\\":\",LOCAL_ALREADY_DEFINED.replace('%s', node.id, 1))\n\n if node.expr:\n self.visit(node.expr, scope)\n expr_type = self.update_type(node.expr.inferenced_type)\n expr_type = conforms(expr_type, node_type)\n node.expr.inferenced_type = expr_type\n \n node.inferenced_type = node_type\n \n @visitor.when(AssignNode)\n def visit(self, node, scope:Scope):\n var = scope.find_variable(node.id)\n if not var:\n node.define = False\n var_type = ErrorType()\n else:\n node.define = True\n var_type = var.type\n\n self.visit(node.expr, scope)\n node_expr = self.update_type(node.expr.inferenced_type)\n\n if var and var.name != \"self\":\n node_expr = conforms(node_expr, var_type)\n node.expr.inferenced_type = node_expr\n if isinstance(var_type, AutoType):\n var_type = conforms(var_type, node_expr)\n var.type = var_type\n\n node.inferenced_type = var_type\n\n @visitor.when(CallNode)\n def visit(self, node, scope):\n if node.obj == None:\n obj_type = self.current_type\n elif isinstance(node.obj, tuple):\n self.visit(node.obj[0], scope)\n child_type = self.update_type(node.obj[0].inferenced_type)\n try:\n obj_type = self.context.get_type(node.obj[1], selftype=False, autotype=False)\n if isinstance(child_type, AutoType):\n child_type.set_upper_limmit([obj_type])\n except SemanticError:\n obj_type = ErrorType()\n else:\n self.visit(node.obj, scope)\n obj_type = self.update_type(node.obj.inferenced_type)\n \n methods = None\n try:\n methods = [(obj_type, obj_type.get_method(node.id))]\n except SemanticError as err:\n if isinstance(obj_type, AutoType):\n result = self.context.get_method_by_name(node.id, len(node.args))\n types = [typex for _, typex in result]\n obj_type.set_upper_limmit(types)\n if len(obj_type.upper_limmit):\n methods = [(t, t.get_method(node.id)) for t in obj_type.upper_limmit]\n else:\n self.AddError(err)\n \n node.inferenced_obj_type = obj_type\n if methods:\n type_set = set()\n heads = []\n for typex, method in methods:\n ret_type = method.return_type\n ret_type = typex if isinstance(ret_type, SelfType) else ret_type\n heads, type_set = smart_add(type_set, heads, ret_type)\n if len(node.args) == len(method.param_types):\n for i in range(len(node.args)):\n arg, param_type = node.args[i], method.param_types[i]\n self.visit(arg, scope)\n arg_type = self.update_type(arg.inferenced_type)\n arg_type = conforms(arg_type, param_type)\n if isinstance(param_type, AutoType):\n param_type = conforms(param_type, arg_type)\n method.param_types[i] = param_type\n self.update_graph(arg_type, param_type)\n arg.inferenced_type = arg_type\n node.inferenced_type = AutoType(node.id, heads, type_set)\n else:\n node.inferenced_type = ErrorType()\n\n @visitor.when(OperationNode)\n def visit(self, node, scope):\n self.visit(node.left, scope)\n left_type = self.update_type(node.left.inferenced_type)\n\n self.visit(node.right, scope)\n right_type = self.update_type(node.right.inferenced_type)\n\n int_type = self.context.get_type(\"Int\")\n if isinstance(left_type, AutoType):\n left_type.set_upper_limmit([int_type])\n \n if isinstance(right_type, AutoType):\n right_type.set_upper_limmit([int_type])\n \n node.inferenced_type = int_type\n\n @visitor.when(ComparisonNode)\n def visit(self, node, scope):\n self.visit(node.left, scope)\n left_type = self.update_type(node.left.inferenced_type)\n\n self.visit(node.right, scope)\n right_type = self.update_type(node.right.inferenced_type)\n\n left_type = conforms(left_type, right_type)\n node.left.inferenced_type = left_type\n right_type = conforms(right_type, left_type)\n node.right.inferenced_type = right_type\n node.inferenced_type = self.context.get_type(\"Bool\")\n \n @visitor.when(NotNode)\n def visit(self, node, scope):\n self.visit(node.lex, scope)\n lex_type = self.update_type(node.lex.inferenced_type)\n bool_type = self.context.get_type(\"Bool\")\n if isinstance(lex_type, AutoType):\n lex_type.set_upper_limmit([bool_type])\n\n node.inferenced_type = bool_type\n \n @visitor.when(HyphenNode)\n def visit(self, node, scope):\n self.visit(node.lex, scope)\n lex_type = self.update_type(node.lex.inferenced_type)\n int_type = self.context.get_type(\"Int\")\n if isinstance(lex_type, AutoType):\n lex_type.set_upper_limmit([int_type])\n node.inferenced_type = int_type\n \n @visitor.when(VariableNode)\n def visit(self, node, scope):\n var = scope.find_variable(node.lex)\n if var:\n node.define = True\n var_type = self.update_type(var.type) \n else:\n node.define = False\n var_type = ErrorType()\n node.inferenced_type = var_type\n\n @visitor.when(IsVoidDeclarationNode)\n def visit(self, node, scope):\n self.visit(node.lex, scope)\n lex_type = self.update_type(node.lex.inferenced_type)\n node.inferenced_type = self.context.get_type(\"Bool\")\n\n @visitor.when(InstantiateNode)\n def visit(self, node, scope):\n try:\n node_type = self.context.get_type(node.lex, selftype=False, autotype=False)\n except SemanticError as err:\n node_type = ErrorType()\n node.inferenced_type = node_type\n \n @visitor.when(ConstantNumNode)\n def visit(self, node, scope):\n node.inferenced_type = self.context.get_type(\"Int\")\n \n @visitor.when(ConstantStringNode)\n def visit(self, node, scope):\n node.inferenced_type = self.context.get_type(\"String\")\n \n @visitor.when(ConstantBoolNode)\n def visit(self, node, scope):\n node.inferenced_type = self.context.get_type(\"Bool\")\n\n def update_graph(self, decl_type, expr_type) -> Type:\n if isinstance(decl_type ,AutoType) and isinstance(expr_type ,AutoType):\n self.set_dependencies(decl_type, expr_type)\n self.set_dependencies(expr_type, decl_type)\n \n def set_dependencies(self, type1:Type, type2:Type):\n try:\n self.inference_graph[type1].add(type2)\n except KeyError:\n self.inference_graph[type1] = set([type2])\n\n\n def update_type(self, typex:Type):\n if isinstance(typex, SelfType):\n typex = self.current_type\n return typex\n\n def AddError(self, extra = \"\", prefixed = \"\"):\n current_type = f\"In class \\\"{self.current_type.name}\\\", \"\n current_loc = f\"in method \\\"{self.current_method.name}\\\". \" if self.current_method else \"\" \n current_loc = f\"in attribute \\\"{self.current_attrb.name}\\\". \" if self.current_attrb else current_loc\n self.errors.append(current_type + current_loc + extra + \" \" + prefixed)","sub_path":"inference_gatherer.py","file_name":"inference_gatherer.py","file_ext":"py","file_size_in_byte":14539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"645710343","text":"## import statements\n\nimport requests_oauthlib\nimport webbrowser\nimport json\nimport csv\nfrom datetime import datetime\n\nimport secret_data\n\n\n# CACHING SETUP #\n# --------------------------------------------------\n# Caching constants\n# --------------------------------------------------\n\nDATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S.%f\"\nDEBUG = True\nCACHE_FNAME = \"cache_contents.json\"\nCREDS_CACHE_FILE = \"creds.json\"\n\n\n# --------------------------------------------------\n# Load cache files: data and credentials\n# --------------------------------------------------\n\n# Load data cache\ntry:\n with open(CACHE_FNAME, 'r', encoding='UTF-8') as cache_file:\n cache_json = cache_file.read()\n CACHE_DICTION = json.loads(cache_json)\nexcept FileNotFoundError:\n CACHE_DICTION = {}\n\n# Load creds cache\ntry:\n with open(CREDS_CACHE_FILE, 'r', encoding='UTF-8')\\\n as creds_file:\n cache_creds = creds_file.read()\n CREDS_DICTION = json.loads(cache_creds)\nexcept FileNotFoundError:\n CREDS_DICTION = {}\n\n\n# ---------------------------------------------\n# Cache functions\n# ---------------------------------------------\ndef has_cache_expired(timestamp_str, expire_in_days):\n \"\"\"Check if cache timestamp is over expire_in_days old\"\"\"\n # gives current datetime\n now = datetime.now()\n\n # datetime.strptime converts a formatted string into datetime object\n cache_timestamp = datetime.strptime(timestamp_str, DATETIME_FORMAT)\n\n # subtracting two datetime objects gives you a timedelta object\n delta = now - cache_timestamp\n delta_in_days = delta.days\n\n # now that we have days as integers, we can just use comparison\n # and decide if cache has expired or not\n if delta_in_days > expire_in_days:\n # It's been longer than expiry time\n return True\n else:\n return False\n\n\ndef get_from_cache(identifier, dictionary):\n \"\"\"If unique identifier exists in specified cache dictionary and has not\n expired, return the data associated with it from the request, else return\n None\n \"\"\"\n # Assuming none will differ with case sensitivity here\n identifier = identifier.upper()\n if identifier in dictionary:\n data_assoc_dict = dictionary[identifier]\n if has_cache_expired(data_assoc_dict['timestamp'], data_assoc_dict[\n \"expire_in_days\"]):\n if DEBUG:\n print(\"Cache has expired for {}\".format(identifier))\n # also remove old copy from cache\n del dictionary[identifier]\n data = None\n else:\n data = dictionary[identifier]['values']\n else:\n data = None\n return data\n\n\ndef set_in_data_cache(identifier, data, expire_in_days):\n \"\"\"Add identifier and its associated values (literal data) to the data\n cache dictionary, and save the whole dictionary to a file as json\n \"\"\"\n identifier = identifier.upper()\n CACHE_DICTION[identifier] = {\n 'values': data,\n 'timestamp': datetime.now().strftime(DATETIME_FORMAT),\n 'expire_in_days': expire_in_days\n }\n\n with open(CACHE_FNAME, 'w', encoding='UTF-8') as cached_file:\n cached_json = json.dumps(CACHE_DICTION)\n cached_file.write(cached_json)\n\n\ndef set_in_creds_cache(identifier, data, expire_in_days):\n \"\"\"Add identifier and its associated values (literal data) to the\n credentials cache dictionary, and save the whole dictionary to a file as\n json\n \"\"\"\n identifier = identifier.upper() # make unique\n CREDS_DICTION[identifier] = {\n 'values': data,\n 'timestamp': datetime.now().strftime(DATETIME_FORMAT),\n 'expire_in_days': expire_in_days\n }\n\n with open(CREDS_CACHE_FILE, 'w', encoding='UTF-8') as cached_file:\n cached_json = json.dumps(CREDS_DICTION)\n cached_file.write(cached_json)\n\n\n# ADDITIONAL CODE for program should go here...\n# Perhaps authentication setup, functions to get and process data,\n# a class definition... etc.\n\n# OAuth1 API Constants - vary by API\n# Private data in a hidden secret_data.py file\n\n# what Tumblr calls Consumer Key\nCONSUMER_KEY = secret_data.consumer_key\n# What Tumblr calls Consumer Secret\nCONSUMER_SECRET = secret_data.consumer_secret\n\n# Specific to API URLs, not private\nREQUEST_TOKEN_URL = \"https://www.tumblr.com/oauth/request_token\"\nBASE_AUTH_URL = \"https://www.tumblr.com/oauth/authorize\"\nACCESS_TOKEN_URL = \"https://www.tumblr.com/oauth/access_token\"\n\n\ndef get_tokens(client_key=CONSUMER_KEY, client_secret=CONSUMER_SECRET,\n request_token_url=REQUEST_TOKEN_URL,\n base_authorization_url=BASE_AUTH_URL,\n access_token_url=ACCESS_TOKEN_URL,\n verifier_auto=True):\n oauth_inst = requests_oauthlib.OAuth1Session(\n client_key, client_secret=client_secret)\n\n fetch_response = oauth_inst.fetch_request_token(request_token_url)\n\n # Using the dictionary .get method in these lines\n resource_owner_key = fetch_response.get('oauth_token')\n resource_owner_secret = fetch_response.get('oauth_token_secret')\n\n auth_url = oauth_inst.authorization_url(base_authorization_url)\n # Open the auth url in browser:\n\n # For user to interact with & approve access of this app -- this script\n webbrowser.open(auth_url)\n\n # Deal with required input, which will vary by API\n if verifier_auto:\n # if the input is default (True), like Twitter\n verifier = input(\"Please input the verifier: \").strip()\n else:\n redirect_result = input(\"Paste the full redirect URL here: \").strip()\n # returns a dictionary\n # -- you may want to inspect that this works and edit accordingly\n oauth_resp = oauth_inst.parse_authorization_response(redirect_result)\n verifier = oauth_resp.get('oauth_verifier')\n\n # Regenerate instance of oauth1session class with more data\n oauth_inst = requests_oauthlib.OAuth1Session(\n client_key, client_secret=client_secret,\n resource_owner_key=resource_owner_key,\n resource_owner_secret=resource_owner_secret,\n verifier=verifier)\n\n # returns a dictionary\n oauth_tokens = oauth_inst.fetch_access_token(access_token_url)\n\n # Use that dictionary to get these things\n # Tuple assignment syntax\n resource_owner_key, resource_owner_secret = oauth_tokens.get(\n 'oauth_token'), oauth_tokens.get('oauth_token_secret')\n\n return client_key, client_secret, resource_owner_key, \\\n resource_owner_secret, verifier\n\n\n# Default: 1 days for creds expiration\ndef get_tokens_from_service(service_name_ident, expire_in_days=1):\n creds_data = get_from_cache(service_name_ident, CREDS_DICTION)\n if creds_data:\n if DEBUG:\n print(\"Loading creds from cache...\")\n print()\n else:\n if DEBUG:\n print(\"Fetching fresh credentials...\")\n print(\"Prepare to log in via browser.\")\n print()\n creds_data = get_tokens(verifier_auto=False)\n set_in_creds_cache(service_name_ident, creds_data,\n expire_in_days=expire_in_days)\n return creds_data\n\n\ndef create_request_identifier(url, params_diction):\n sorted_params = sorted(params_diction.items(), key=lambda x: x[0])\n # Make the list of tuples into a flat list using a complex list\n # comprehension\n params_str = \"_\".join([str(e) for l in sorted_params for e in l])\n total_ident = url + \"?\" + params_str\n return total_ident.upper() # Creating the identifier\n\n\ndef get_data_from_api(request_url, service_ident, params_diction,\n expire_in_days=1):\n \"\"\"Check in cache, if not found, load data, save in cache and then return\n that data\n \"\"\"\n ident = create_request_identifier(request_url, params_diction)\n data = get_from_cache(ident, CACHE_DICTION)\n if data:\n if DEBUG:\n print(\"Loading from data cache: {}... data\".format(ident))\n else:\n if DEBUG:\n print(\"Fetching new data from {}\".format(request_url))\n\n # Get credentials\n client_key, client_secret, resource_owner_key, resource_owner_secret,\\\n verifier = get_tokens_from_service(service_ident)\n\n # Create a new instance of oauth to make a request with\n oauth_inst = requests_oauthlib.OAuth1Session(\n client_key, client_secret=client_secret,\n resource_owner_key=resource_owner_key,\n resource_owner_secret=resource_owner_secret)\n # Call the get method on oauth instance\n # Work of encoding and \"signing\" the request happens behind the scenes,\n # thanks to the OAuth1Session instance in oauth_inst\n resp = oauth_inst.get(request_url, params=params_diction)\n # Get the string data and set it in the cache for next time\n data_str = resp.text\n data = json.loads(data_str)\n set_in_data_cache(ident, data, expire_in_days)\n return data\n\n\n# Define classes for post data\nclass Post(object):\n def __init__(self, dict_post):\n self.blog_name = dict_post['blog_name']\n self.post_id = dict_post['id']\n self.post_url = dict_post['post_url']\n self.post_type = dict_post['type']\n self.post_date = dict_post['date']\n\n def __str__(self):\n return \"Post #{0} ({1}) by {2} at {3}\\nRetrieved from {4}\".format(\n self.post_id, self.post_type, self.blog_name, self.post_date,\n self.post_url)\n\n\nclass PhotoPost(Post):\n def __init__(self, dict_photo_post):\n super().__init__(dict_photo_post)\n\n self.caption = dict_photo_post['caption']\n list_photo = dict_photo_post['photos']\n self.photos = [photo['original_size']['url'] for photo in list_photo]\n\n def __str__(self):\n return (\"Post #{0} ({1}) by {2} at {3}\\nRetrieved from {4}\".format(\n self.post_id, self.post_type, self.blog_name, self.post_date,\n self.post_url))\\\n + (\"\\nCaption: {0}\\n\".format(self.caption.replace('\\n', ' ')))\\\n + \"Photos:\\n\" + \"\\n\".join(self.photos)\n\n\nclass TextPost(Post):\n def __init__(self, dict_text_post):\n super().__init__(dict_text_post)\n\n self.title = dict_text_post['title']\n self.body = dict_text_post['body']\n\n def __str__(self):\n return (\"Post #{0} ({1}) by {2} at {3}\\nRetrieved from {4}\".format(\n self.post_id, self.post_type, self.blog_name, self.post_date,\n self.post_url))\\\n + \"\\nTitle: {0}\\nBody: {1}\".format(\n self.title, self.body.replace('\\n', ' '))\n\n\n# Make sure to run your code and write CSV files by the end of the program.\n# Method for printing CSV files.\ndef print_posts_csv(list_post, file_name='posts.csv'):\n with open(file_name, \"w\", encoding='utf-8', newline='')\\\n as csv_file:\n # Write column names\n writer = csv.writer(csv_file)\n writer.writerow(\n [\"Blog Name\", \"Post ID\", \"Post Type\", \"Post Date\", \"Post URL\"])\n for post in list_post:\n # Write national site entry lines\n writer.writerow(\n [post.blog_name,\n post.post_id,\n post.post_type,\n post.post_date,\n post.post_url.strip()])\n\n\ndef print_photo_posts_csv(list_post, file_name='photo_posts.csv'):\n with open(file_name, \"w\", encoding='utf-8', newline='')\\\n as csv_file:\n # Write column names\n writer = csv.writer(csv_file)\n writer.writerow(\n [\"Blog Name\", \"Post ID\", \"Post Type\", \"Caption\", \"Photo URL\",\n \"Post Date\", \"Post URL\"])\n for post in list_post:\n # Write national site entry lines\n for photo in post.photos:\n writer.writerow(\n [post.blog_name,\n post.post_id,\n post.post_type,\n post.caption.replace('\\n', ' '),\n photo,\n post.post_date,\n post.post_url.strip()])\n\n\ndef print_text_posts_csv(list_post, file_name='text_posts.csv'):\n with open(file_name, \"w\", encoding='utf-8', newline='')\\\n as csv_file:\n # Write column names\n writer = csv.writer(csv_file)\n writer.writerow(\n [\"Blog Name\", \"Post ID\", \"Post Type\", \"Title\", \"Body\",\n \"Post Date\", \"Post URL\"])\n for post in list_post:\n # Write national site entry lines\n writer.writerow(\n [post.blog_name,\n post.post_id,\n post.post_type,\n post.title,\n post.body.replace('\\n', ' '),\n post.post_date,\n post.post_url.strip()])\n\n\nif not CONSUMER_KEY or not CONSUMER_SECRET:\n print(\"You need to fill in sonsumer_key and consumer_secret in the \"\n \"secret_data.py file.\")\n exit()\nif not REQUEST_TOKEN_URL or not BASE_AUTH_URL:\n print(\"You need to fill in this API's specific OAuth2 URLs in this \"\n \"file.\")\n exit()\n\n# Invoke functions\nTUMBLR_SEARCH_BASEURL = \"https://api.tumblr.com/v2/blog/\"\ntumblr_search_nbc_posts_baseurl = \\\n TUMBLR_SEARCH_BASEURL + \"nbcnews.tumblr.com/posts/\"\n\n# Search for photos\ntumblr_search_params = {'type': \"photo\",\n 'limit': 20,\n 'filter': \"text\"}\ntumblr_result = get_data_from_api(\n tumblr_search_nbc_posts_baseurl, \"Tumblr\",\n tumblr_search_params)\n# print(type(tumblr_result))\n# pprint(tumblr_result)\n\nresult_posts = tumblr_result['response']['posts']\nposts = [Post(post) for post in result_posts]\nphoto_posts = [PhotoPost(post) for post in result_posts]\n# i = 0\n# for post in posts:\n# i += 1\n# print(i)\n# print(post)\n# print()\n# for post in photo_posts:\n# print(post)\n# print()\n\n# Search for text\ntumblr_search_params = {'type': \"text\",\n 'limit': 20,\n 'filter': \"text\"}\n\ntumblr_result = get_data_from_api(\n tumblr_search_nbc_posts_baseurl, \"Tumblr\",\n tumblr_search_params)\n# print(type(tumblr_result))\n# pprint(tumblr_result)\n\nresult_posts = tumblr_result['response']['posts']\ntext_posts = [TextPost(post) for post in result_posts]\n# for post in text_posts:\n# print(post)\n# print()\n\nprint_posts_csv(posts)\nprint_photo_posts_csv(photo_posts)\nprint_text_posts_csv(text_posts)","sub_path":"SI507project5_code.py","file_name":"SI507project5_code.py","file_ext":"py","file_size_in_byte":14380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"182488813","text":"import cmd\nimport sys\nimport os\n# import pyfem1d.tkgui\n\nclass Pyfem1dShell(cmd.Cmd):\n intro = 'Type help or ? to list commands.\\n'\n prompt = 'pyfem1d>> '\n file = None\n\n def __init__(self,parent):\n cmd.Cmd.__init__(self)\n #super(Pyfem1dShell, self).__init__()\n self.analysis = parent\n\n def do_verbose(self, arg):\n '''Enable verbose output'''\n self.analysis.verbose = True\n #print(arg)\n\n # def do_gui(self, arg):\n # '''Start graphical user interface'''\n # pyfem1d.tkgui.startGui(self.analysis)\n\n def do_dt(self, arg):\n '''Set timestep size: dt value'''\n val, errormsg = parse_line(arg, type = float, n_args = 1)\n if not errormsg:\n self.analysis.timestep = val[0]\n else:\n print(errormsg)\n\n def do_nelem(self, arg):\n '''Set total number of elements with nelem(value)'''\n val, errormsg = parse_line(arg, type = int, n_args = 1)\n if not errormsg:\n self.analysis.number_of_elements = val[0]\n else:\n print(errormsg)\n\n def do_tmax(self, arg):\n '''Set maximum time tmax(value)'''\n val, errormsg = parse_line(arg, type = float, n_args = 1)\n if not errormsg:\n self.analysis.maximum_time = val[0]\n else:\n print(errormsg)\n\n def do_bctype(self, arg):\n '''Set bc type with bctype(val), can be 0,1,2'''\n val, errormsg = parse_line(arg, type = int, n_args = 1)\n if not errormsg:\n if val[0] == 0 or val[0] == 1 or val[0] == 2:\n self.analysis.bctype = val[0]\n else:\n print(\"Error: bctype can only be one of 0, 1 and 2\")\n else:\n print(errormsg)\n\n\n def do_solve(self, arg):\n '''Start solution with solve()'''\n try:\n self.analysis.solve()\n except Exception as e:\n print(e)\n\n def do_plot(self, arg):\n '''Plot the output stress file using gnuplot. optional argument: stress_file'''\n val, errormsg = parse_line(arg, type = str)\n if len(val) == 0:\n stress_file = self.analysis.stress_file\n elif len(val) == 1:\n stress_file = val[0]\n else:\n print(\"Error: plot cannot receive more than 1 arguments\")\n\n self.analysis.plotToWindow(stress_file=stress_file)\n\n def do_plotpdf(self, arg):\n '''Plot the output stress file to a pdf file using gnuplot. optional arguments: plot_file stress_file'''\n val, errormsg = parse_line(arg, type = str)\n if len(val) == 0:\n plot_file = self.analysis.plot_file\n stress_file = self.analysis.stress_file\n elif len(val) == 1:\n plot_file = val[0]\n stress_file = self.analysis.stress_file\n elif len(val) == 2:\n plot_file = val[0]\n stress_file = val[1]\n else:\n print(\"Error: plot cannot receive more than 2 arguments\")\n\n self.analysis.plotPdf(stress_file=stress_file, plot_file=plot_file)\n\n def do_pwd(self, arg):\n 'Prints current working directory'\n print(self.analysis.workingDirectory)\n\n #def do_cd(self, arg):\n #'Changes current working directory'\n\n def do_addumats(self, arg):\n '''Add umat material: addumat exampleMaterial.py'''\n val, errormsg = parse_line(arg, type = str, n_args = 1)\n if not errormsg:\n try:\n path = os.path.join(self.analysis.workingDirectory, val[0])\n self.analysis.add_umats(path)\n except Exception as e:\n print(e)\n else:\n print(errormsg)\n\n def do_addloads(self, arg):\n '''Add loading function: addloading exampleLoading.py'''\n val, errormsg = parse_line(arg, type = str, n_args = 1)\n if not errormsg:\n try:\n path = os.path.join(self.analysis.workingDirectory, val[0])\n self.analysis.add_loads(path)\n except Exception as e:\n print(e)\n else:\n print(errormsg)\n\n\n def do_load(self, arg):\n '''Set load function: load triangle '''\n val, errormsg = parse_line(arg, type = str, n_args = \"1+\")\n if not errormsg:\n self.analysis.set_load(val[0])\n if len(val) > 1:\n parameters = tuple(map(float, val[1:]))\n self.analysis.set_load_parameters(parameters)\n else:\n print(errormsg)\n\n def do_listumats(self, arg):\n '''List all umats with the parameters and their values'''\n for key, umat in self.analysis.umat_dict.items():\n print(umat)\n\n def do_listloads(self, arg):\n '''List all loading functions with the parameters and their values'''\n for key, load in self.analysis.load_dict.items():\n print(load)\n\n def do_vars(self, arg):\n '''List all variable values and current functions'''\n print(self.analysis.header())\n\n\n def do_umat(self, arg):\n '''Set umat function: umat maxwell '''\n val, errormsg = parse_line(arg, type = str, n_args = \"1+\")\n\n if not errormsg:\n try:\n self.analysis.set_umat(val[0])\n if len(val) > 1:\n parameters = tuple(map(float, val[1:]))\n self.analysis.set_umat_parameters(parameters)\n except Exception as e:\n print(e)\n else:\n print(errormsg)\n\n def do_run(self, arg):\n val, errormsg = parse_line(arg, type = str, n_args = 1)\n if not errormsg:\n try:\n self.analysis.cmdShell.execFile(val[0])\n except Exception as e:\n print(e)\n else:\n print(errormsg)\n\n def do_q(self, arg):\n '''End session'''\n self.close()\n return True\n\n def do_EOF(self,arg):\n '''End session'''\n self.close()\n return True\n\n # ----- record and playback -----\n #def do_record(self, arg):\n #'Save future commands to filename: RECORD rose.cmd'\n #self.file = open(arg, 'w')\n #def do_playback(self, arg):\n #'Playback commands from a file: PLAYBACK rose.cmd'\n #self.close()\n #with open(arg) as f:\n #self.cmdqueue.extend(f.read().splitlines())\n\n def precmd(self, line):\n # line = line.lower()\n if self.file and 'playback' not in line:\n print(line, file=self.file)\n return line\n\n def close(self):\n if self.file:\n self.file.close()\n self.file = None\n\n def execFile(self, filename):\n f = open( filename, \"r\" )\n array = []\n for line in f:\n array.append( line )\n f.close()\n for i,j in enumerate(array):\n try:\n line = cleanLine(j)\n if(line):\n #print(\"Executing: \"+line)\n self.onecmd(line)\n except Exception as err:\n print(err)\n\n\ndef cleanLine(line):\n return line.split('#')[0].strip()\n\ndef parse_line(arg, n_args = None, type=int):\n 'Convert a series of zero or more numbers to an argument tuple'\n result = tuple(map(type, arg.split()))\n if isinstance(n_args, str):\n if n_args[-1] == \"+\":\n min_limit = int(n_args[:-1])\n if len(result) < min_limit:\n errormsg = \"expected minimum \"+str(min_limit)+\" arguments, received \"+str(len(result))\n else:\n errormsg = None\n elif isinstance(n_args, int):\n if n_args and len(result) != n_args:\n errormsg = \"expected \"+str(n_args)+\" arguments, received \"+str(len(result))\n else:\n errormsg = None\n else:\n errormsg = None\n return result, errormsg\n\n","sub_path":"pyfem1d/pyfem1d_cmd.py","file_name":"pyfem1d_cmd.py","file_ext":"py","file_size_in_byte":7840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"62837993","text":"# -- coding: utf-8 --\n'''\n文件夹结构\n| ---- 13411978121020180131174754151\n| ---- 13411964458020180131180551058\n| ---- ...其他包含切片的工单文件夹\n| ---- TEMP(自动生成,临时储存转换后的图片,可以删除)\n| ---- OUTPUT(自动生成,储存分类后的原始图片)\n| | ---- 0\n| | ---- 1\n| | ---- ...(共有十个文件夹,第九和十个文件夹暂无用处)\n| ---- model(储存模型权重文件)\n| | ---- model1.ckpt.data-00000-of-00001\n| | ---- model1.ckpt.index\n| | ---- model1.ckpt.meta\n| | ---- checkpoint\n| ---- code(储存代码)\n| | ---- main_class.py(主文件)\n| | ---- model1.py(模型的结构文件)\n| | ---- ...\n'''\n\nimport tensorflow as tf\nimport glob\nimport os\nimport io\nimport numpy as np\nfrom model1_0320 import model\n#from PIL import Image\nimport shutil\nfrom wand.image import Image\nfrom wand.display import display\nfrom wand.color import Color\nfrom PIL import Image as PImage\nfrom sklearn.metrics import confusion_matrix, accuracy_score\n\nSIZE = 200\nPATH_DATA = os.path.join(os.path.pardir,'data')\nFOLDER_TYPE = 'val'\nRECORD_NAME = os.path.join(PATH_DATA,'%s.tfrecord'%FOLDER_TYPE) # 储存转换后的图片,预测后可以删除\n#path = \n#RECORD_NAME = os.paht.join(path,'model1_test.tfrecords')\n\ndef getFolders():\n \n folders = os.listdir(os.path.join(PATH_DATA,FOLDER_TYPE)) # 获取所有文件\n \n def filterFolder(f): # 判断文件是否文件夹\n if os.path.splitext(f)[1] == '' and f != 'model' and f != 'TEMP' \\\n and f != 'BACKUP' and f != 'OUTPUT' and f != '__pycache__' \\\n and f != 'code':\n return True # 如果是文件夹\n return False\n \n folders = list(filter(filterFolder, folders))\n return folders\n\n\n\ndef convertImg(folder): # 切除白块和重置尺寸\n \n if not os.path.exists(os.path.join(PATH_DATA,'TEMP')):\n os.mkdir(os.path.join(PATH_DATA,'TEMP'))\n \n temp_names = glob.glob(os.path.join(PATH_DATA,'TEMP','*.*'))\n list(map(lambda x: os.remove(x), temp_names)) # 把temp文件夹里面的文件清空\n \n names = glob.glob(os.path.join(PATH_DATA,FOLDER_TYPE,folder, '*.jpg'))\n for idx,name in enumerate(names):\n img = Image(filename=name) \n bg = Color('white') # 设置要切换的边缘颜色,此处为白色\n img.trim(color=bg, fuzz=20) # 切除白块,20是测试出来的\n img.resize(SIZE, SIZE)\n# img.background_color = bg\n# img.format = 'jpg'\n# img.alpha_channel = False\n img.save(filename=os.path.join(os.path.pardir,'data','TEMP','%s.jpg' % idx))\n return names\n\n\ndef write_tfrecord(all_names): # 将文件夹下的图片写入tfrecord\n \n with tf.python_io.TFRecordWriter(RECORD_NAME) as writer:\n for label_true, names in all_names.items():\n for name in names:\n byte_img = PImage.open(name)\n byte_img = byte_img.convert('RGB') # 有些图片是黑白灰度,需要转换成RGB\n byte_img = byte_img.resize((200,200))\n byte_img = byte_img.tobytes()\n byte_name = bytes(name, encoding='utf-8')\n# print('write:',label_true, byte_name, len(byte_img))\n tf_feature = {'byte_img':tf.train.Feature(bytes_list=tf.train.BytesList(value=[byte_img])),\n 'byte_name':tf.train.Feature(bytes_list=tf.train.BytesList(value=[byte_name])),\n 'label_true':tf.train.Feature(int64_list=tf.train.Int64List(value=[label_true]))} \n tf_features = tf.train.Features(feature=tf_feature)\n example = tf.train.Example(features=tf_features)\n writer.write(example.SerializeToString()) \n\ndef read_decode(serialized_example):\n\n tf_features = {'byte_img':tf.FixedLenFeature([], tf.string),\n 'byte_name':tf.FixedLenFeature([], tf.string),\n 'label_true':tf.FixedLenFeature([], tf.int64)}\n \n features = tf.parse_single_example(\n serialized_example,\n features=tf_features)\n \n img = tf.decode_raw(features['byte_img'], tf.uint8)\n img = 2*tf.cast(img, tf.float32) * (1./255) -1\n img = tf.reshape(img, [200,200,3])\n name = tf.cast(features['byte_name'],tf.string)\n label_true = tf.cast(features['label_true'], tf.int64)\n return img, name, label_true\n \ndef read_copy():\n \n if not os.path.exists(os.path.join(os.path.pardir,'OUTPUT')):\n os.mkdir(os.path.join(os.path.pardir,'OUTPUT'))\n \n for i in range(10): # 检查输出文件夹是否存在\n if not os.path.exists(os.path.join(os.path.pardir,'OUTPUT',str(i))):\n os.mkdir(os.path.join(os.path.pardir,'OUTPUT',str(i)))\n dataset = tf.data.TFRecordDataset(RECORD_NAME)\n dataset = dataset.map(read_decode)\n dataset = dataset.repeat(1)\n dataset = dataset.batch(5)\n iterator = dataset.make_one_shot_iterator()\n \n batch_image, batch_name, batch_label = iterator.get_next()\n score_label = model(batch_image, False)\n pred_label = tf.argmax(score_label, 1)\n softmax = tf.nn.softmax(score_label)\n\n init_op = tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer())\n \n saver = tf.train.Saver()\n y_true, y_pred, names, label_prob, label_score = [], [], [], [], []\n with tf.Session() as sess:\n sess.run(init_op)\n saver.restore(sess, os.path.join(os.path.pardir,'model','model1.ckpt')) \n\n while True:\n try:\n label_pred,name,label_true, pred_prob, pred_score = sess.run([pred_label, batch_name, batch_label, softmax, score_label])\n\n# name = str(name[0],encoding='utf8')\n print(label_pred, label_true)\n y_true.extend(label_true)\n y_pred.extend(label_pred)\n names.extend(name)\n label_prob.extend(pred_prob)\n label_score.extend(pred_score)\n\n# n = os.path.split(name)\n# shutil.copyfile(name,os.path.join(os.path.pardir,'OUTPUT',str(label), '%s_%s'%(n[0][3:], n[1])))\n except:\n print('Finish')\n break\n return y_true, y_pred, names, label_prob,label_score\n\nfolders = getFolders()\nimg_names = {}\nfor idx,folder in enumerate(folders[:]):\n# print(folder)\n img_list = glob.glob(os.path.join(PATH_DATA,FOLDER_TYPE,folder, '*.jpg'))\n np.random.shuffle(img_list)\n if folder in ['6','7','8']:\n# continue\n img_names[int(folder)] = img_list[:]\n else:\n img_names[int(folder)] = img_list[:]\n# pass\n\ntf.reset_default_graph() \n \n\nwrite_tfrecord(img_names)\ny_true, y_pred, names, label_prob, label_score = read_copy()\n\ndef decode_name(name):\n return str(name,encoding='utf8')\n\n\ndef softmax(z):\n assert len(z.shape) == 2\n s = np.max(z, axis=1)\n s = s[:, np.newaxis] # necessary step to do broadcasting\n e_x = np.exp(z - s)\n div = np.sum(e_x, axis=1)\n div = div[:, np.newaxis] # dito\n return e_x / div\n\nnames = list(map(decode_name, names))\n\n\ny_true = np.array(y_true)\ny_pred = np.array(y_pred)\nnames = np.array(names)\nlabel_prob = np.array(label_prob)\nlabel_score = np.array(label_score)\n\n#label_prob = softmax(label_prob)\nprint(confusion_matrix(y_true, y_pred))\nprint(accuracy_score(y_true, y_pred))\ny_pred2 = y_pred\ny_pred[label_prob.max(axis=1) < 0.90] = 8\n\nprint(confusion_matrix(y_true, y_pred))\nprint(accuracy_score(y_true, y_pred))\nfor t,p,n in zip(y_true[y_true != y_pred], y_pred[y_true != y_pred], names[y_true != y_pred]):\n print(t, p, n)","sub_path":"1_ImageClass/4_Validate/save_score.py","file_name":"save_score.py","file_ext":"py","file_size_in_byte":7682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"391686433","text":"from functions import *\r\nimport pickle\r\n\r\n# initialise the data\r\nspam = init_lists('enron/spam/')\r\nham = init_lists('enron/ham/')\r\nall_emails = [(email, 'spam') for email in spam]\r\nall_emails += [(email, 'ham') for email in ham]\r\nrandom.shuffle(all_emails)\r\nprint ('Corpus size = ' + str(len(all_emails)) + ' emails')\r\n\r\n# extract the features\r\nall_features = [(get_features(email), label) for (email, label) in all_emails]\r\nprint ('Collected ' + str(len(all_features)) + ' feature sets')\r\n\r\n# train the classifier\r\ntrain_set, test_set, classifier = train(all_features, 0.8)\r\n\r\n# evaluate its performance\r\nevaluate(train_set, test_set, classifier)\r\n \r\nf = open('my_classifier.pickle', 'wb')\r\npickle.dump(classifier, f)\r\nf.close()\r\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"181189284","text":"#!/usr/bin/python\n\nfrom __future__ import print_function\nimport argparse, sys, os.path, lib.batcher, multiprocessing, copy, psutil\nfrom lib.bb.AcrParser import ExistingFileAction\nfrom sklearn.linear_model import LinearRegression as lr\nfrom sklearn import preprocessing\nfrom multiprocessing import Process, Manager\nimport pandas as pd\nimport numpy as np\n\nTZ_DICT = {'asia':'Asia/Shanghai', 'us':'America/New_York'} # timezone options\n\ndef get_args(argv):\n\n description = \"\"\"\nDescription: Regress target(s) on feature subsets in two modes. By default, \neach regression leaves one feature (a signal type, an instrument, or their\ninteraction) out in turn. In the additive mode, it adds one instrument in turn \nto the first (baseline) instrument. \n\nData file consists of columns of numeric data. All rows must have the same \nnumber of columns. Multiple targets are allowed, with results displayed\nand written to file separately.\n \nDesc must start with an index number and a space, followed by the pattern \n\"sig_name:instr_name[+variations]\". 'Sig_' will be prefixed to sig_name if not \nalready present. Model name in the backward mode indicates the _absence_ of \nthat feature in the model.\n \"\"\"\n formatter = lambda prog: argparse.ArgumentDefaultsHelpFormatter(prog, max_help_position=36)\n parser = argparse.ArgumentParser(formatter_class=formatter, description=description)\n parser.add_argument('--additive', '-a', action='store_true', help='additive/forward mode with first instrument as baseline')\n parser.add_argument('--bydate', '-b', metavar='metafile', action=ExistingFileAction, help='data will be divided into k folds by date if this file is provided')\n parser.add_argument('--to-csv', '-c', metavar='result', help='write results to csv; a prefix indicating target variable will be added automatically')\n parser.add_argument('--desc', '-d', metavar='descfile', action=ExistingFileAction, help='description file')\n parser.add_argument('--header', action='store_true', help='file has a header, no need for descfile')\n parser.add_argument('--kfold', '-k', metavar='int', default=0, help='number of folds for cross validation', type=int)\n parser.add_argument('--separator', '-s', metavar='char', default=' ', help='separator of the signal/target file')\n parser.add_argument('--type', '-t', metavar='typename', default='instr', help='feature type for backward selection, options: instr, sig, or both (instr-sig interaction)', type=str)\n parser.add_argument('--timezone', '-z', metavar='timezone', default='asia', help='convert Epoch time in metafile to the market timezone for date filtering, options: asia, us, or specify one according to pytz', type=str) \n parser.add_argument('--datecol', '-D', metavar='int', default=1, help='index of date column in metafile', type=int)\n parser.add_argument('--num-decimals', '-N', metavar='int', default=6, help='number of decimals to print; numbers are truncated, not rounded', type=int)\n parser.add_argument('sigfile', action=ExistingFileAction, help='signal file, with each column as a feature')\n parser.add_argument('targfile', action=ExistingFileAction, help='target file, multiple targets/columns allowed')\n\n args = parser.parse_args()\n\n if not (args.desc or args.header):\n raise Exception(\"please provide descfile or add header to signal file\")\n\n if args.type.lower() not in ['instr', 'sig', 'both']:\n raise Exception(\"invalid feature type: {0}; please choose one from 'instr', 'sig', and 'both'\".format(args.type))\n\n return args\n\n\ndef get_dataframes(data, target, args):\n '''get dataframes from input files'''\n header = None\n if args.header:\n header = 'infer'\n df = pd.read_csv(data, sep=args.separator, header=header, skipinitialspace=True).dropna(axis=1, how='all')\n dft = pd.read_csv(target, sep=args.separator, header=header, skipinitialspace=True).dropna(axis=1, how='all')\n\n if df.empty:\n raise Exception(\"no signal data found\")\n\n if dft.empty:\n raise Exception(\"no target data found\")\n\n if args.desc:\n descs = ['-'.join(l.strip().split(' ')[1:3]) for l in open(args.desc, 'r')]\n column_mapping = dict(zip(range(len(descs)), descs))\n df.rename(columns=column_mapping, inplace=True)\n\n # check if any column contains nonnumeric values\n nonnumeric_mask = np.array([d == 'object' for d in df.dtypes])\n nonnum_df = df.columns[nonnumeric_mask]\n if len(nonnum_df) > 0:\n raise Exception(\"\\n\\nNonnumeric column:\\n{0}\".format(\"\\n\".join([str(c) for c in nonnum_df])))\n\n df_size = os.path.getsize(data) \n \n return df, dft, df_size\n\n# helper functions\ndef uniq(seq):\n '''retrieve unique items from a list while preserving the order'''\n seen = set()\n return [x for x in seq if not (x in seen or seen.add(x))]\n\ndef parse_desc(args):\n ''' parse desc file and retrieve sig and instr names'''\n sig = [l.strip().split(' ')[1].split(':')[0] for l in open(args.desc,'r')]\n instr = [l.strip().split(' ')[1].split(':')[1] for l in open(args.desc, 'r')]\n return sig, instr\n\ndef get_measures(X_train, Y_train, X_test=None, Y_test=None, normalize=True):\n '''compute regression statistics (in or out of sample)'''\n model = lr(normalize=normalize)\n try:\n model.fit(X_train, Y_train)\n except TypeError:\n import cPickle\n cPickle.dump([X_train, Y_train], open('regsubset.cPickle', 'w'))\n raise\n\n if X_test is None or Y_test is None:\n X, Y = X_train, Y_train\n else:\n X, Y = X_test, Y_test\n \n e = Y - model.predict(X)\n SSR = e.T.dot(e)\n n, k = X.shape[0], X.shape[1]\n dim = Y.shape[1]\n SSRs = np.diagonal(SSR)\n R2, MSE, AIC, BIC = [], [], [], []\n \n for i, ssr in enumerate(SSRs):\n s2 = ssr/(n - 1)\n mse = ssr/(n - k)\n lgL = n*np.log(1./np.sqrt(2*np.pi*s2)) - 0.5/s2*ssr\n aic = 2*k - 2*lgL\n bic = k*np.log(n) - 2*lgL\n sst = np.sum((Y[:,i] - np.mean(Y[:,i]))**2)\n r2 = 1 - ssr/sst\n R2.append(r2)\n MSE.append(mse)\n AIC.append(aic)\n BIC.append(bic)\n\n return R2, MSE, AIC, BIC, dim\n\ndef slice_df(factor_list, factor, exclude=True):\n '''exclude/include indices of a specific factor'''\n idx = np.where(factor_list == factor)[0]\n if exclude:\n idx = np.setdiff1d(np.arange(len(factor_list)), idx, assume_unique=True)\n return idx\n\ndef get_result(result, X, Y, idx, model):\n ''' compute and append the result to a list'''\n r2, mse, aic, bic, dim = get_measures(X[:, idx], Y)\n for i in range(dim):\n y = 'Y'+str(i)\n result.append({'Model':model, 'Y':y, 'R2':r2[i], 'MSE':mse[i], 'AIC':aic[i], 'BIC':bic[i]})\n\ndef divide_by_date(args):\n '''divide data into k-fold without overlapping dates in training and testing sets'''\n import datetime\n from pytz import timezone, utc\n from sklearn.cross_validation import KFold\n\n datefile = pd.read_csv(args.bydate, sep=args.separator, header=None, usecols=[args.datecol], skipinitialspace=True).dropna(axis=1, how='all')\n arr = datefile.iloc[:, 0].as_matrix()\n tz_user = args.timezone\n if tz_user.lower() in TZ_DICT.keys():\n mkt_tz = timezone(TZ_DICT[tz_user.lower()])\n else:\n try:\n mkt_tz = timezone(tz_user)\n except Exception:\n print('timezone {0} cannot be recognized'.format(tz_user))\n raise\n dates = [datetime.datetime.utcfromtimestamp(x).replace(tzinfo=utc).astimezone(mkt_tz).date() for x in arr]\n dates = np.array(dates)\n uniq_dt = uniq(dates)\n ind = np.arange(len(dates))\n\n if args.kfold > len(uniq_dt):\n raise Exception('Number of folds is greater than the number of unique dates {0}'.format(len(uniq_dt)))\n\n _kf = KFold(len(uniq_dt), n_folds=args.kfold, shuffle=True)\n kf = []\n for _train, _test in _kf:\n train, test = [], []\n for i in _train:\n train.append(list(ind[dates == uniq_dt[i]]))\n for j in _test:\n test.append(list(ind[dates == uniq_dt[j]]))\n train = np.array(sum(train,[]))\n test = np.array(sum(test,[]))\n kf.append((train, test))\n\n return kf\n\ndef get_result_k(result, X_train_t, X_test_t, Y_train, Y_test, batch, model, idx):\n '''compute and append k-fold result to a list'''\n x_train_t, x_test_t = X_train_t[:, idx], X_test_t[:, idx]\n r2, mse, aic, bic, dim = get_measures(x_train_t, Y_train, x_test_t, Y_test, normalize=False)\n for i in range(dim):\n y = 'Y'+str(i)\n result.append({'batch':batch, 'model':model, 'Y':y, '0':r2[i], '1':mse[i], '2':aic[i], '3':bic[i]})\n\n# helper functions end\n\ndef get_stats(df, dft, args, df_size):\n '''compute statistics of regressions using subset features'''\n\n avail_mem = psutil.virtual_memory()[1] # system memory available\n usage_per_proc = df_size * 3 # empirical estimate from testing: ~2.5\n num_procs = avail_mem / usage_per_proc\n if num_procs < 1:\n raise Exception('not enough memory to start a process')\n half_cpus = multiprocessing.cpu_count()/2\n num_procs = np.min([num_procs, half_cpus]) \n \n # extract signal and instrument names from desc\n if args.header:\n keyname = df.columns.values\n sig = [l.split(':')[0] for l in keyname]\n instr = [l.split(':')[1] for l in keyname]\n else:\n sig, instr = parse_desc(args)\n \n # add Sig_ prefix so that plotsubstats can distinguish sig feature from instr \n for i, s in enumerate(sig):\n if s[:3].lower() != 'sig':\n sig[i] = 'Sig_' + s\n\n uniq_sig = uniq(sig)\n uniq_instr = uniq(instr)\n X, Y = np.array(df), np.array(dft) # sklearn works with np arrays\n instr = np.array(instr)\n sig = np.array(sig)\n\n # initialize multiprocessing manager\n manager = Manager()\n process_list = []\n\n base_idx = slice_df(instr, uniq_instr[0], exclude=False)\n\n # without cross validation\n if args.kfold == 0:\n if args.additive: # additive/forward selection of instruments\n temp_list = manager.list()\n for i in uniq_instr:\n if i == uniq_instr[0]:\n modelname = i\n idx = base_idx\n else:\n modelname = '+' + i\n idx = slice_df(instr, i, exclude=False)\n idx = np.concatenate((idx, base_idx))\n temp_process = Process(target=get_result, args=(temp_list, X, Y, idx, modelname))\n process_list.append(temp_process)\n\n else: # backward selection\n r2, mse, aic, bic, dim = get_measures(X, Y)\n stats = []\n for i in range(dim):\n stats.append({'Model':'Full', 'Y':'Y'+str(i), 'R2':r2[i], 'MSE':mse[i], 'AIC':aic[i], 'BIC':bic[i]})\n temp_list = manager.list(stats)\n\n # by instruments\n if args.type.lower() == 'instr':\n for i in uniq_instr:\n idx = slice_df(instr, i)\n temp_process = Process(target=get_result, args=(temp_list, X, Y, idx, i))\n process_list.append(temp_process)\n\n # by signals\n elif args.type.lower() == 'sig':\n for i in uniq_sig:\n idx = slice_df(sig, i)\n temp_process = Process(target=get_result, args=(temp_list, X, Y, idx, i))\n process_list.append(temp_process)\n \n # by sig/instr interaction pairs\n elif args.type.lower() == 'both':\n for i in uniq_instr:\n for s in uniq_sig:\n idx_i = slice_df(instr, i, exclude=False)\n idx_s = slice_df(sig, s, exclude=False)\n idx = np.nonzero(np.in1d(idx_i, idx_s))[0] # common idx\n if idx.size > 0:\n idx = np.setdiff1d(np.arange(df.shape[1]), idx, assume_unique=True)\n temp_process = Process(target=get_result, args=(temp_list, X, Y, idx, i+':'+s))\n process_list.append(temp_process)\n\n b = lib.batcher.ProcessBatcher(process_list, num_procs)\n b.run()\n \n results = list(temp_list)\n stats_list = []\n for i in range(dft.shape[1]):\n stats_list.append([])\n for d in results: \n idx = int(d['Y'][1]) \n stats_list[idx].append(d)\n for i, s in enumerate(stats_list):\n stats_list[i] = pd.DataFrame(s)\n \n # k-fold cross validation\n else:\n if args.bydate: # split data into k-fold sets with non-overlapping dates\n kf = divide_by_date(args)\n else:\n from sklearn.cross_validation import KFold\n kf = KFold(df.shape[0], n_folds=args.kfold, shuffle=True)\n\n temp_list = manager.list()\n cnt = 0\n for train, test in kf:\n X_train, X_test, Y_train, Y_test = X[train], X[test], Y[train], Y[test]\n scaler = preprocessing.StandardScaler().fit(X_train)\n X_train_t = scaler.transform(X_train) # standardize training examples\n X_test_t = scaler.transform(X_test) # standardize testing examples with the same scaler\n\n if args.additive:\n for i in uniq_instr:\n if i == uniq_instr[0]:\n modelname = i\n idx = base_idx\n else:\n modelname = '+' + i\n idx = slice_df(instr, i, exclude=False)\n idx = np.concatenate((idx, base_idx))\n temp_process = Process(target=get_result_k, args=(temp_list, X_train_t, X_test_t, Y_train, Y_test, cnt, modelname, idx))\n process_list.append(temp_process)\n else:\n # full model\n temp_process = Process(target=get_result_k, args=(temp_list, X_train_t, X_test_t, Y_train, Y_test, cnt, 'Full', np.arange(df.shape[1])))\n process_list.append(temp_process)\n \n # by instruments\n if args.type.lower() == 'instr':\n for i in uniq_instr:\n idx = slice_df(instr, i)\n temp_process = Process(target=get_result_k, args=(temp_list, X_train_t, X_test_t, Y_train, Y_test, cnt, i, idx))\n process_list.append(temp_process)\n \n # by signals\n elif args.type.lower() == 'sig':\n for i in uniq_sig:\n idx = slice_df(sig, i)\n temp_process = Process(target=get_result_k, args=(temp_list, X_train_t, X_test_t, Y_train, Y_test, cnt, i, idx))\n process_list.append(temp_process)\n \n # by sig/instr interaction pair\n elif args.type.lower() == 'both':\n pairs = []\n for i in uniq_instr:\n for s in uniq_sig:\n pair = i + ':' + s\n pairs.append(pair)\n idx_i = slice_df(instr, i, exclude=False)\n idx_s = slice_df(sig, s, exclude=False)\n idx = np.nonzero(np.in1d(idx_i, idx_s))[0] # common idx\n if idx.size > 0:\n idx = np.setdiff1d(np.arange(df.shape[1]), idx, assume_unique=True)\n temp_process = Process(target=get_result_k, args=(temp_list, X_train_t, X_test_t, Y_train, Y_test, cnt, pair, idx))\n process_list.append(temp_process)\n cnt += 1\n\n b = lib.batcher.ProcessBatcher(process_list, num_procs)\n b.run()\n\n # compile results into d and take the average\n base = uniq_instr[0] if args.additive else 'Full'\n if args.additive:\n add_instr = ['+' + i for i in uniq_instr[1:]] \n add_sig = []\n elif args.type.lower() == 'both':\n add_instr = pairs \n add_sig = []\n else:\n add_instr = uniq_instr\n add_sig = uniq_sig\n \n results = list(temp_list)\n d = dict.fromkeys([base] + add_instr + add_sig)\n \n for k in d.keys():\n d[k] = np.zeros((args.kfold, 4)) # k batches (rows) by 4 stats (columns) \n\n stats_tmp = []\n for i in range(dft.shape[1]):\n stats_tmp.append(copy.deepcopy(d))\n for dic in results:\n idx = int(dic['Y'][1])\n dd = stats_tmp[idx]\n model = dic['model']\n b = dic['batch']\n for i in range(4):\n dd[model][b, i] = dic[str(i)]\n\n stats_list = []\n for i, s in enumerate(stats_tmp):\n stats = []\n for k in d.keys():\n arr = s[k].mean(axis=0)\n stats.append({'Model':k, 'Y':'Y'+str(i), 'R2':arr[0], 'MSE':arr[1], 'AIC':arr[2], 'BIC':arr[3]})\n stats = pd.DataFrame(stats)\n stats = stats.loc[~(stats[['AIC','BIC','MSE','R2']] == 0).all(axis=1)]\n stats_list.append(stats)\n \n # sort results in the order of full/baseline, instr, and sig\n stats = stats_list[0]\n base_b = stats['Model'] == uniq_instr[0] if args.additive else stats['Model'] == 'Full'\n sig_b = base_b.copy()\n for i, name in stats['Model'].iteritems():\n if name in uniq_sig:\n sig_b.loc[i] = True\n else:\n sig_b.loc[i] = False\n\n for i, s in enumerate(stats_list):\n base = s[base_b]\n sigs = s[sig_b]\n instrs = s[~sig_b & ~base_b]\n \n sort_value = getattr(df, \"sort_values\", None)\n if callable(sort_value):\n sigs = sigs.sort_values(by='Model')\n instrs = instrs.sort_values(by='Model')\n else:\n sigs = sigs.sort(columns='Model')\n instrs = instrs.sort(columns='Model')\n \n s_tmp = pd.concat([base, instrs, sigs])\n s_tmp.AIC = s_tmp.AIC - base.AIC.values[0]\n s_tmp.BIC = s_tmp.BIC - base.BIC.values[0]\n s_tmp = s_tmp.set_index('Model') \n stats_list[i] = s_tmp\n \n return stats_list\n\n\ndef main(argv):\n args = get_args(argv)\n signal = args.sigfile\n target = args.targfile\n df, dft, df_size = get_dataframes(signal, target, args)\n stats_list = get_stats(df, dft, args, df_size)\n\n formatter = \"%.{0}f\".format(args.num_decimals)\n for i, stats in enumerate(stats_list):\n print(stats.to_string(float_format=lambda float_val: formatter % (float_val)),'\\n')\n if args.to_csv:\n stats.to_csv('Y'+str(i)+'.'+args.to_csv)\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])","sub_path":"contribution/regsubset.py","file_name":"regsubset.py","file_ext":"py","file_size_in_byte":18932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"263296073","text":"#!/usr/bin/env python\n# coding=utf-8\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\n# sopel imports\nimport sopel.module\n\n# imports for system and OS access, directories\nimport os\nimport sys\n\n\n# imports based on THIS file\nmoduledir = os.path.dirname(__file__)\nshareddir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\nsys.path.append(shareddir)\nfrom BotShared import *\n\n\n\n\n\n\n\n# Start listener on welcome RPL, which should only ever be received once\n@module.event('001')\n@module.rule('.*')\n@module.thread(True)\ndef bot_startup_ip_addr(bot, trigger):\n\n if bot_startup_requirements_met(bot, [\"ip_address\"]):\n return\n\n # don't run jobs if not ready\n while not bot_startup_requirements_met(bot, [\"botdict\"]):\n pass\n\n bot.memory[\"botdict\"][\"tempvals\"]['networking'] = dict()\n\n bot.memory[\"botdict\"][\"tempvals\"]['networking']['interfaces'] = netifaces.interfaces()\n\n bot.memory[\"botdict\"][\"tempvals\"]['networking']['ip_addresses'] = []\n\n for i in bot.memory[\"botdict\"][\"tempvals\"]['networking']['interfaces']:\n if i == 'lo':\n continue\n iface = netifaces.ifaddresses(i).get(netifaces.AF_INET)\n if iface:\n for j in iface:\n bot.memory[\"botdict\"][\"tempvals\"]['networking']['ip_addresses'].append(str(j['addr']))\n\n bot_startup_requirements_set(bot, \"ip_address\")\n","sub_path":"Modules/BotCore/Startup/Bot_IP_Addresses.py","file_name":"Bot_IP_Addresses.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"423602425","text":"done = False # establish variables\nlines = list()\n\nwhile not done: # get user input until error occurs\n try:\n userInput = input(\"Enter a string: \") # read input from user\n lines.append(userInput)\n except EOFError as error:\n done = True # prevent further input\n lines.reverse() # print lines in reverse\n for line in lines:\n print(line)","sub_path":"HW1/q1.py","file_name":"q1.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"148582673","text":"import pickle\nimport os\nimport goldsberry\nfrom playerScripts import NBAPlayer\nfrom playerScripts import NoSuchPlayer\nfrom gameScripts import NBAGames\nfrom utilsScripts import join_advanced_lineup_dicts, is_lineup_valid, pickles_folder_path\n\nteams_id_dict_pickle_path = os.path.join(pickles_folder_path, \"nba_teams_numbers_dict.pickle\")\nwith open(teams_id_dict_pickle_path, 'rb') as file1:\n teams_id_dict = pickle.load(file1)\n \"\"\":type : dict\"\"\"\n\nnba_teams_all_shooters_lineups_dicts_path_regex = os.path.join(pickles_folder_path,\n 'nba_teams_all_shooters_lineups_dicts_{season}.pickle')\n\n\nclass NBATeam(object):\n def __init__(self, team_name_or_id, season='2015', initialize_stat_classes=True):\n \"\"\"\n\n :rtype : An NBA team object\n \"\"\"\n if type(team_name_or_id) is int:\n self.team_id = team_name_or_id\n elif type(team_name_or_id) is str:\n self.team_id = teams_id_dict[team_name_or_id]\n else:\n raise Exception('Constructor only receives string ot integer')\n self.season = season\n if initialize_stat_classes:\n for stat_class in filter(lambda x: not x.startswith('_'), dir(goldsberry.team)):\n stat_class_function = getattr(goldsberry.team, stat_class)\n if 'season' in stat_class_function.__init__.func_code.co_varnames:\n setattr(self, stat_class, stat_class_function(team_id=self.team_id, season=self.season))\n else:\n setattr(self, stat_class, stat_class_function(team_id=self.team_id))\n self.games_summary_dicts = NBAGames(season).get_specific_team_games(self.team_id)\n self.players_objects_list = []\n \"\"\":type : list[NBAPlayer]\"\"\"\n\n def __repr__(self):\n teams_name_dict = {v: k for k, v in teams_id_dict.items()}\n return \"{team_name} Object\".format(team_id=teams_name_dict[self.team_id])\n\n def initialize_players_objects(self, initialize_stat_classes=True, override=False):\n \"\"\"\n Fills (If empty) member 'players_objects_list' of class with a list of player objects for players on the roster\n \"\"\"\n if not override and self.players_objects_list:\n raise Exception('Players_objects_list object is already set')\n else:\n self.players_objects_list = []\n for player_dict in goldsberry.team.roster(team_id=self.team_id, season=self.season).players():\n try:\n nba_player_object = NBAPlayer(PERSON_ID=player_dict['PLAYER_ID'],\n season=self.season,\n initialize_stat_classes=initialize_stat_classes)\n self.players_objects_list.append(nba_player_object)\n except NoSuchPlayer:\n print (\n \"{player_name} was not found in leagues players, even though he's on the team roster\".format(\n player_name=player_dict['PLAYER']))\n except Exception as e:\n self.players_objects_list = []\n raise e\n\n def get_filtered_lineup_dicts(self, white_list=None, black_list=None):\n \"\"\"\n\n :param white_list: player objects white list\n :type white_list: list[NBAPlayer]\n :param black_list: player objects black list\n :type black_list: list[NBAPlayer]\n :return: Filtered dict based on the parameters given\n :rtype: list[dict]\n \"\"\"\n if not white_list:\n white_list = []\n if not black_list:\n black_list = []\n\n return filter(lambda lineup_dict: is_lineup_valid(lineup_dict, white_list, black_list), self.lineups.lineups())\n\n def get_all_shooters_lineup_dicts(self, attempts_limit=20):\n if not self.players_objects_list:\n self.initialize_players_objects()\n\n only_non_shooters_player_objects = [player_object for player_object in self.players_objects_list\n if not player_object.is_three_point_shooter(attempts_limit=attempts_limit)]\n all_shooters_lineup_dicts = self.get_filtered_lineup_dicts(black_list=only_non_shooters_player_objects)\n return all_shooters_lineup_dicts\n\n\nif __name__ == \"__main__\":\n # suns = NBATeam('suns')\n # only_shooters_suns_lineups = suns.get_all_shooters_lineup_dicts()\n # suns_all_shooters_advanced_stats = join_advanced_lineup_dicts(only_shooters_suns_lineups)\n #\n # bobcats = NBATeam('bobcats')\n # only_shooters_bobcats_lineups = bobcats.get_all_shooters_lineup_dicts()\n # bobcats_all_shooters_advanced_stats = join_advanced_lineup_dicts(only_shooters_bobcats_lineups)\n my_season = 2015\n league_dict = {}\n for team_name, team_id in teams_id_dict.items():\n team_object = NBATeam(team_id)\n only_shooters_team_lineups = team_object.get_all_shooters_lineup_dicts()\n team_all_shooters_advanced_stats = join_advanced_lineup_dicts(only_shooters_team_lineups)\n league_dict[team_name] = (only_shooters_team_lineups, team_all_shooters_advanced_stats)\n with open(nba_teams_all_shooters_lineups_dicts_path_regex.format(season=my_season), 'wb') as file1:\n pickle.dump(league_dict, file1)\n","sub_path":"teamScripts.py","file_name":"teamScripts.py","file_ext":"py","file_size_in_byte":5359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"253018801","text":"#!/usr/bin/env python3\n\n\"\"\"\n Syllabification script\n\n Copyright (C) 2016 Elie Roux\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of\n this software and associated documentation files (the \"Software\"), to deal in\n the Software without restriction, including without limitation the rights to\n use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\n of the Software, and to permit persons to whom the Software is furnished to do\n so, subject to the following conditions:\n\n The above copyright notice and this permission notice shall be included in all\n copies or substantial portions of the Software.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n SOFTWARE.\n\n Depends on pyphen: http://pyphen.org/\n You also need the hyph_la_liturgical.dic file. To get it, get the\n hyphen-la project on https://github.com/gregorio-project/hyphen-la\n and run \"make\" in the \"patterns\" directory.\n\n\"\"\"\n\nimport pyphen\nimport argparse\nimport sys\nimport re\n\nparser = argparse.ArgumentParser(\n \t\t\t\tdescription='A script to \"syllabify\" (insert a character between all syllables) a file.',formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument('-t', '--text-type',\n help='text type (chant or prose)',\n default='chant', dest='type')\nparser.add_argument('-m', '--hyphen-mode',\n help='Hyphenation mode (liturgical, phonetic or etymology)',\n default='liturgical', dest='mode')\nparser.add_argument('-i', '--input', nargs='?', type = argparse.FileType('r'),\n\t\t\t\t\tdefault=sys.stdin, dest='inputfile')\nparser.add_argument('-o', '--output-file', nargs='?', type=argparse.FileType('w'),\n default=sys.stdout, dest='outputfile')\nparser.add_argument('-c', '--hyphen-char', nargs='?',\n default='-', dest='hyphenchar')\n\nargs = parser.parse_args()\n\nrighthyphenmin = 2\nlefthyphenmin = 2\ncutvowels = False\nif (args.type == 'chant'):\n\trighthyphenmin=1\n\tlefthyphenmin=1\n\tcutvowels = True\n\nhyphenator = pyphen.Pyphen(filename='../patterns/hyph_la_'+args.mode+'.dic',left=lefthyphenmin,right=righthyphenmin)\n\ndef hyphenate_one_word(word):\n\tglobal hyphenator,args\n\treturn hyphenator.inserted(word,args.hyphenchar)\n\nwordregex = re.compile(r'\\b[^\\W\\d_]+\\b')\n\nfor line in args.inputfile:\n\tline = line.strip()\n\thyphenline = wordregex.sub(lambda match: hyphenate_one_word(match.group(0)), line)\n\targs.outputfile.write(hyphenline+'\\n')\n\nargs.inputfile.close()\nargs.outputfile.close()\n","sub_path":"syllabifier/syllabify.py","file_name":"syllabify.py","file_ext":"py","file_size_in_byte":2999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"448245481","text":"# -*- coding: utf-8 -*-\n\nimport requests\nimport json\nimport time\nfrom lxml import etree\nimport html\nimport re\nfrom bs4 import BeautifulSoup\n\n\nclass Weibospider:\n def __init__(self):\n # 获取首页的相关信息:\n self.start_url = 'https://weibo.com/cctvxinwen?profile_ftype=1&is_all=1#_0'\n self.domain=\"100406\"\n self.uuid=\"1537790411\"\n self.headers = {\n \"accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"cache-control\": \"max-age=0\",\n \"cookie\": \"UM_distinctid=16ebb2a69e87c9-0f9e77de80db66-7373e61-1fa400-16ebb2a69e946c; SINAGLOBAL=9078084295180.92.1575098149469; wb_timefeed_6690784751=1; un=17603072726; ALF=1616915411; SCF=AudfR6nptB5k5GPk2ngKX-Z921MsZz1mk6ZCW1y-e2Dcf2L-kfpApyRwSSfnZR7PY-jnJZLjvyww2feOwq079SA.; SUHB=0y0ZcWLCI9543c; ULV=1585379421606:3:3:3:7266807228602.845.1585379421548:1585370401285; UOR=www.baidu.com,vdisk.weibo.com,www.baidu.com; YF-Page-G0=8a1a69dc6ba21f1cd10b039dff0f4381|1585497087|1585497087; SUB=_2AkMp3Es3f8NxqwJRmfoTzG_lbY5yzwvEieKfgLrsJRMxHRl-yT92qnQEtRB6Alxl2FqAT8K5PGjwZBGo92zjhCu5p8nO; SUBP=0033WrSXqPxfM72-Ws9jqgMF55529P9D9W5HWPXBcydOZ1KeChIjlHJj\", # 此处填入自己的cookie\n \"referer\": \"https://www.weibo.com/u/\"+self.uuid+\"?topnav=1&wvr=6&topsug=1\",\n \"upgrade-insecure-requests\": \"1\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36\",\n }\n \n\n def parse_home_url(self, url): # 处理解析首页面的详细信息(不包括两个通过ajax获取到的页面)\n res = requests.get(url, headers=self.headers)\n response = res.content.decode().replace(\"\\\\\", \"\")\n every_id = re.compile('name=(\\d+)', re.S).findall(response) # 获取次级页面需要的id\n every_link = re.compile('', re.S).findall(response) \n home_url = []\n for id in every_id:\n base_url = 'https://weibo.com/aj/v6/comment/big?ajwvr=6&id={}&from=singleWeiBo'\n url = base_url.format(id)\n home_url.append(url)\n return home_url\n\n def parse_comment_info(self, url): # 爬取直接发表评论的人的相关信息(name,info,time,info_url)\n res = requests.get(url, headers=self.headers)\n response = res.json()\n count = response['data']['count']\n html = etree.HTML(response['data']['html'])\n name = html.xpath(\n \"//div[@class='list_li S_line1 clearfix']/div[@class='WB_face W_fl']/a/img/@alt\") # 评论人的姓名\n info = html.xpath(\n \"//div[@node-type='replywrap']/div[@class='WB_text']/text()\") # 评论信息\n info = \"\".join(info).replace(\" \", \"\").split(\"\\n\")\n info.pop(0)\n comment_time = html.xpath(\n \"//div[@class='WB_from S_txt2']/text()\") # 评论时间\n name_url = html.xpath(\n \"//div[@class='WB_face W_fl']/a/@href\") # 评论人的url\n name_url = [\"https:\" + i for i in name_url]\n comment_info_list = []\n for i in range(len(name)):\n item = {}\n item[\"name\"] = name[i] # 存储评论人的网名\n item[\"comment_info\"] = info[i] # 存储评论的信息\n item[\"comment_time\"] = comment_time[i] # 存储评论时间\n item[\"comment_url\"] = name_url[i] # 存储评论人的相关主页\n comment_info_list.append(item)\n return count, comment_info_list\n\n def write_file(self, path_name, content_list):\n for content in content_list:\n with open(path_name, \"a\", encoding=\"UTF-8\") as f:\n f.write(json.dumps(content, ensure_ascii=False))\n f.write(\"\\n\")\n\n def run(self):\n start_url = 'https://weibo.com/u/'+self.uuid+'?page={}&is_all=1'\n start_ajax_url1 = 'https://weibo.com/p/aj/v6/mblog/mbloglist?ajwvr=6&domain='+self.domain+'&is_all=1&page={0}&pagebar=0&pl_name=Pl_Official_MyProfileFeed__20&id='+self.domain+''+self.uuid+'&script_uri=/u/'+self.uuid+'&pre_page={0}'\n start_ajax_url2 = 'https://weibo.com/p/aj/v6/mblog/mbloglist?ajwvr=6&domain='+self.domain+'&is_all=1&page={0}&pagebar=1&pl_name=Pl_Official_MyProfileFeed__20&id='+self.domain+''+self.uuid+'&script_uri=/u/'+self.uuid+'&pre_page={0}'\n for i in range(12): # 微博共有12页\n home_url = self.parse_home_url(start_url.format(i + 1)) # 获取每一页的微博\n ajax_url1 = self.parse_home_url(\n start_ajax_url1.format(i + 1)) # ajax加载页面的微博\n ajax_url2 = self.parse_home_url(\n start_ajax_url2.format(i + 1)) # ajax第二页加载页面的微博\n all_url = home_url + ajax_url1 + ajax_url2\n print(home_url)\n print(ajax_url1)\n print(ajax_url2)\n print(all_url)\n for j in range(len(all_url)):\n # print(all_url[j])\n path_name = \"cctvxinwen第{}条微博相关评论.txt\".format(i * 45 + j + 1)\n all_count, comment_info_list = self.parse_comment_info(\n all_url[j])\n self.write_file(path_name, comment_info_list)\n for num in range(1, 10000):\n if num * 15 < int(all_count) + 15:\n comment_url = all_url[j] + \"&page={}\".format(num + 1)\n print(comment_url)\n try:\n count, comment_info_list = self.parse_comment_info(\n comment_url)\n self.write_file(path_name, comment_info_list)\n except Exception as e:\n print(\"Error:\", e)\n time.sleep(60)\n count, comment_info_list = self.parse_comment_info(\n comment_url)\n self.write_file(path_name, comment_info_list)\n del count\n time.sleep(5)\n\n print(\"第{}微博信息获取完成!\".format(i * 45 + j + 1))\n\n\nif __name__ == '__main__':\n weibo = Weibospider()\n weibo.run()\n","sub_path":"cctvxinwen.py","file_name":"cctvxinwen.py","file_ext":"py","file_size_in_byte":6396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"560164747","text":"import numpy as np\nimport pandas as pd\n\n'''\nget_choice --- get a potential set of child node choice\n the format of element in choice refers to 'itemset' in forecast.py\n'''\n\n\ndef get_choice(forecast, itemset_column):\n # forecast ------ dataframe of forecast\n # itemset_column ------ the dimension of cuboid to be considered (like ['i','e',0,0,0])\n choice = []\n index_ite = []\n columns = forecast.columns\n for i in range(len(itemset_column)):\n if itemset_column[i] != 0:\n index_ite.append(i)\n for i in range(len(forecast)):\n tempitem = [0] * len(itemset_column)\n for j in index_ite:\n tempitem[j] = forecast.iloc[i][columns[j]]\n if tempitem not in choice:\n choice.append(tempitem)\n return choice\n","sub_path":"Code/hotspot/hotspot/choice.py","file_name":"choice.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"317350417","text":"import sys\nfrom iterutil import peekiter\nfrom lisptypes import SYMBOL, CONS, NIL\nfrom dictutil import register\nfrom debuggery import log\n\n__all__ = [\n 'READ',\n 'read_table',\n]\n\nreadtable = {\n}\n\ndef READ(end=''):\n \"\"\" Reads a string and returns a parsed Lisp value.\n \"\"\"\n #. Eat leading spaces.\n skip_spaces()\n c = next(stream)\n log(\"C\", c)\n if c in end:\n return None\n elif c in readtable:\n val = readtable[c]()\n else: #Anything else.\n #stream.putback(c)\n val = read_SYMBOL(c, end)\n if val is None:\n return READ(end) #try again.\n return val\n\n\ndef skip_spaces():\n while stream.peek().isspace():\n next(stream)\n\n\ndef read_SYMBOL(s, end):\n \"\"\" Read until space or something.\n\n s is what's already been read.\n Numbers are symbols, too.\n \"\"\"\n log('read_SYMBOL', s, end)\n for c in stream:\n #! Be careful about reading too far.\n if c.isspace():\n break\n if c in end:\n stream.putback(c)\n break\n s += c\n log(\"SYMBOL\", s)\n try: return int(s)\n except ValueError: pass\n try: return float(s)\n except ValueError: pass\n return SYMBOL(s)\n\n\n@register(readtable, \"(\")\ndef read_LIST():\n log(\"read_LIST\")\n # a list is a cons cell chain\n val = READ(')')\n if val is None:\n return NIL\n elif val == '.':\n return READ(')') #expects a ')' right after.\n #? How should I specify \"expects this after\"?\n else:\n return CONS(val, read_LIST())\n\n\n@register(readtable, \"'\")\ndef read_QUOTE():\n return CONS(SYMBOL('QUOTE'), CONS(READ(), NIL))\n\n\n@register(readtable, \";\")\ndef read_COMMENT():\n for c in stream:\n if c == '\\n':\n break\n return\n\n\n@register(readtable, '\"')\ndef read_STRING():\n s = ''\n for c in stream:\n if c == '\"':\n break\n s += c\n return s\n\n\n@register(readtable, '#')\ndef read_POUND():\n ...\n\n\ndef istream(file):\n for line in file:\n yield from line\n\n\ndef refresh(file=None):\n global stream\n stream = peekiter(istream(file or sys.stdin))\n\nrefresh()\n\n\n","sub_path":"read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"247684185","text":"from django.urls import path, include\nfrom .views import *\nfrom django.conf.urls.static import static\nfrom django.conf import settings\n\nurlpatterns = [\n path('', index, name='index'),\n path('signup/', SignUpView.as_view(), name='signup'),\n path('accounts/', include('django.contrib.auth.urls')),\n path('add_statement/', StatementCreateView.as_view(), name='add_statement'),\n path('my_statements/', MyStatsList.as_view(), name='my_statements'),\n path('about_stat/', StatDetailView.as_view(), name='about_stat'),\n path('accept_stat/', StatUpdateView.as_view(), name='accept_stat'),\n path('unprocessed/', UnprocessedStatList.as_view(), name='unprocessed_stat'),\n path('delete_stat/', delete_stat, name='delete_stat'),\n path('all_statements/', AllStatementsList.as_view(), name='all_statements'),\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)","sub_path":"django3/registr_applications/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"454821433","text":"\n\nfrom xai.brain.wordbase.verbs._embed import _EMBED\n\n#calss header\nclass _EMBEDDING(_EMBED, ):\n\tdef __init__(self,): \n\t\t_EMBED.__init__(self)\n\t\tself.name = \"EMBEDDING\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"embed\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_embedding.py","file_name":"_embedding.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"303576999","text":"#\n# Fichier Exemple pour Module math\n#\n\n\n#Importation du module\nfrom math import *\n\n#manipulation des constantes (pi) et des fonctions (sqrt, sin...)\nnombre = 121\nangle = pi/6 # soit 30°\n\nprint(\"racine carrée de\", nombre, \"=\", sqrt(nombre))\nprint(\"sinus de\", angle, \"radians\", \"=\", sin(angle))\n\n","sub_path":"Module en Python/math_finished.py","file_name":"math_finished.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"98237769","text":"import sys\nimport argparse\nimport yaml\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nimport torchlight\nfrom torchlight import str2bool\nfrom torchlight import DictAction\nfrom torchlight import import_class\n\nfrom .io import IO\nfrom .data_tools import *\n# import data_tools as tools\n\n\nclass Processor(IO):\n\n def __init__(self, argv=None):\n\n self.load_arg(argv)\n self.init_environment()\n self.load_model()\n self.load_weights()\n self.gpu()\n self.load_data()\n self.load_optimizer()\n\n def init_environment(self):\n super().init_environment()\n self.result = dict()\n self.iter_info = dict()\n self.epoch_info = dict()\n self.meta_info = dict(epoch=0, iter=0)\n\n\n def load_optimizer(self):\n pass\n\n\n def load_data(self):\n if self.arg.debug==True:\n self.actions = define_actions('walking')\n else:\n self.actions = define_actions(self.arg.actions)\n self.train_dict, self.complete_train = load_data(self.arg.train_dir, self.actions)\n self.test_dict, self.complete_test = load_data(self.arg.test_dir, self.actions)\n self.data_mean, self.data_std, self.dim_ignore, self.dim_use, self.dim_zero, self.dim_nonzero = normalization_stats(self.complete_train)\n\n\n def show_epoch_info(self):\n for k, v in self.epoch_info.items():\n self.io.print_log('\\t{}: {}'.format(k, v))\n if self.arg.pavi_log:\n self.io.log('train', self.meta_info['iter'], self.epoch_info)\n\n\n def show_iter_info(self):\n if self.meta_info['iter'] % self.arg.log_interval == 0:\n info ='\\tIter {} Done.'.format(self.meta_info['iter'])\n for k, v in self.iter_info.items():\n if isinstance(v, float):\n info = info + ' | {}: {:.4f}'.format(k, v)\n else:\n info = info + ' | {}: {}'.format(k, v)\n self.io.print_log(info)\n\n if self.arg.pavi_log:\n self.io.log('train', self.meta_info['iter'], self.iter_info)\n\n\n def train(self):\n for _ in range(100):\n self.iter_info['loss'] = 0\n self.show_iter_info()\n self.meta_info['iter'] += 1\n self.epoch_info['mean loss'] = 0\n self.show_epoch_info()\n\n\n def test(self):\n for _ in range(100):\n self.iter_info['loss'] = 1\n self.show_iter_info()\n self.epoch_info['mean loss'] = 1\n self.show_epoch_info()\n\n\n def start(self):\n self.io.print_log('Parameters:\\n{}\\n'.format(str(vars(self.arg))))\n\n if self.arg.phase == 'train':\n self.MAE_tensor = np.zeros((self.arg.iter_num//self.arg.eval_interval, 8, 13))\n self.mask = torch.ones(25).to(self.dev)\n self.mask[10:] = 2\n for itr in range(self.arg.iter_num):\n self.train()\n if ((itr+1) % self.arg.save_interval==0) or (itr+1==self.arg.iter_num):\n filename = 'iter{}_model.pt'.format(itr+1)\n self.io.save_model(self.model, filename)\n if ((itr+1) % self.arg.eval_interval==0) or (itr+1==self.arg.iter_num):\n if (itr+1) % self.arg.savemotion_interval ==0:\n save_motion = True\n else:\n save_motion = False\n self.io.print_log('eval Iteration: {}'.format(itr+1))\n self.test(iter_time=itr//self.arg.eval_interval, save_motion=save_motion)\n self.MAE = self.MAE_tensor.min(axis=0)\n self.MAE[:,-1] = self.MAE.mean(axis=-1)*13/10.\n\n print_str = \"{0: <16} |\".format(\"milliseconds\")\n for ms in [40, 80, 120, 160, 200, 240, 280, 320, 360, 400, 560, 1000]:\n print_str = print_str + \" {0:5d} |\".format(ms)\n self.io.print_log(print_str)\n for idx, action in enumerate(self.actions):\n print_str = \"{0: <16} |\".format(action)\n for ms_idx, ms in enumerate([0,1,2,3,4,5,6,7,8,9,10,11]):\n if self.arg.target_seq_len >= ms+1:\n print_str = print_str + \" {0:.3f} |\".format(self.MAE[idx, ms])\n else:\n print_str = print_str + \" n/a |\"\n self.io.print_log(print_str)\n\n # for act_num in range(8):\n # print_str = str(self.MAE[act_num])\n # self.io.print_log(print_str)\n\n elif self.arg.phase == 'test':\n if self.arg.weights is None:\n raise ValueError('Please appoint --weights.')\n self.io.print_log('Model: {}.'.format(self.arg.model))\n self.io.print_log('Weights: {}.'.format(self.arg.weights))\n self.io.print_log('Evaluation Start:')\n self.test(phase=True)\n\n\n @staticmethod\n def get_parser(add_help=False):\n\n parser = argparse.ArgumentParser( add_help=add_help, description='Base Processor')\n\n parser.add_argument('-w', '--work_dir', default='./work_dir/tmp', help='the work folder for storing results')\n parser.add_argument('-c', '--config', default=None, help='path to the configuration file')\n\n # processor\n parser.add_argument('--phase', default='train', help='must be train or test')\n parser.add_argument('--save_result', type=str2bool, default=False, help='if ture, the output of the model will be stored')\n parser.add_argument('--iter_num', type=int, default=10000, help='stop training in which iteration')\n parser.add_argument('--use_gpu', type=str2bool, default=True, help='use GPUs or not')\n parser.add_argument('--device', type=int, default=0, nargs='+', help='the indexes of GPUs for training or testing')\n\n # visulize and debug\n parser.add_argument('--log_interval', type=int, default=100, help='the interval for printing training messages (#iteration)')\n parser.add_argument('--save_interval', type=int, default=500, help='the interval for storing models (#iteration)')\n parser.add_argument('--eval_interval', type=int, default=500, help='the interval for evaluating models (#iteration)')\n parser.add_argument('--savemotion_interval', type=int, default=200000, help='the interval for saving predicted samples (#iteration)')\n parser.add_argument('--save_log', type=str2bool, default=True, help='save logging or not')\n parser.add_argument('--print_log', type=str2bool, default=True, help='print logging or not')\n parser.add_argument('--pavi_log', type=str2bool, default=False, help='logging on pavi or not')\n\n # data loading\n parser.add_argument('--actions', default='all', help='the categories of actions that we use')\n parser.add_argument('--train_dir', default='../data/', help='direction of training set')\n parser.add_argument('--test_dir', default='../data/', help='direction of test set')\n parser.add_argument('--sample_dir', default='../samples', help='save generated samples')\n parser.add_argument('--batch_size', type=int, default=64, help='batch size')\n parser.add_argument('--source_seq_len', type=int, default=50, help='length of input sequence')\n parser.add_argument('--target_seq_len', type=int, default=25, help='length of predicted sequence')\n\n # model\n parser.add_argument('--model', default=None, help='the model will be used')\n parser.add_argument('--model_args', action=DictAction, default=dict(), help='the arguments of model')\n parser.add_argument('--edge_weighting', type=bool, default=True, help='Add edge importance weighting')\n parser.add_argument('--weights', default=None, help='the weights for network initialization')\n parser.add_argument('--ignore_weights', type=str, default=[], nargs='+', help='the name of weights which will be ignored in the initialization')\n\n return parser","sub_path":"cmu-long/processor/processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":8001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"330230880","text":"from __future__ import print_function, division\nfrom math import sqrt\n\ndef avg(list1):\n #get average value for a list\n sum_list1=0.\n for i in list1:\n sum_list1=sum_list1+i\n list1_len=len(list1)\n avg=sum_list1/list1_len\n return(avg)\n\ndef deviation(list1, avg):\n #define standard deviation equation for a given list of values and the average\n N=len(list1)\n num_sum=0.\n for i in list1:\n num_sum=num_sum+(i-avg)**2\n std_dev=sqrt(num_sum/N)\n return(std_dev)\n\n#This code creates a list with the necessary 127 values we are using to determine the standard deviation\ntest_list=list(range(1,128))\nfor i in list(range(len(test_list))):\n test_list[i]=i+100001\nprint(test_list)\n\navg_test_list=avg(test_list)\nprint(deviation(test_list, avg_test_list))\n\n\n\n\n","sub_path":"Lab03/lab03/LAB03_StdDev.py","file_name":"LAB03_StdDev.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"628569856","text":"import os.path as osp\nimport tensorflow as tf\nimport vgg16\n\n\nvgg16_path = '' # NOTE: set to the downloaded vgg16.npy\n\n\nclass PerceptualLoss:\n def __init__(self, x, y, image_shape, layers, w_layers, w_act=0.1):\n \"\"\"\n Builds vgg16 network and computes the perceptual loss.\n \"\"\"\n assert len(image_shape) == 3 and image_shape[-1] == 3\n assert osp.exists(vgg16_path), 'Cannot find %s'\n\n self.w_act = w_act\n self.vgg_layers = layers\n self.w_layers = w_layers\n batch_shape = [None] + image_shape # [None, H, W, 3]\n\n vgg_net = vgg16.Vgg16(opts.vgg16_path)\n self.x_acts = vgg_net.get_vgg_activations(x, layers)\n self.y_acts = vgg_net.get_vgg_activations(y, layers)\n loss = 0\n for w, act1, act2 in zip(self.w_layers, self.x_acts, self.y_acts):\n loss += w * tf.reduce_mean(tf.square(self.w_act * (act1 - act2)))\n self.loss = loss\n\n def __call__(self):\n return self.loss\n\n","sub_path":"vgg_loss.py","file_name":"vgg_loss.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"147799004","text":"import numpy as np\r\n# lsit untuk keperluan data, array, stacking dan queue\r\n\r\ntext1 = ' hari ini hari minggu'\r\ntext2 = ' hari ini hari jum\\'at '\r\ntext3 = ' abda: \"kemarin kemana bro ?\"'\r\ntext4 = \" abda : \\\"jalan jalan ke sini bro\\\"\"\r\ntext5 = ' abda : \"kita mau jalan jalan ke mana bro\" \\nfatur : \"kemana aja bro\"'\r\ntext6 = \"\"\"\r\nlu mau ke mana bro\r\nsini bro \r\nmau ikut lo?\r\n\r\n\"\"\"\r\ntext7 = r\"C:\\multimedia\"\r\nprint (text7)\r\nprint (10*'ekekek')\r\n\r\n# cara mengambil string \r\n\r\ndata = \"dataoutputan\"\r\n\r\na = data [3]\r\nb = data [0:5]\r\nc = data [-5]\r\nprint (a)\r\nprint (b)\r\nprint (c)\r\n\r\nkata = \"anggora kucing\"\r\nd = kata [8:]\r\nprint (d)\r\nprint ('ee '+d+' kembang pasir')\r\n\r\ndataangka = [1,3,4,8,12,14,17,20,30,40]\r\n# akses list\r\ndata1 = dataangka[3]\r\ndata2 = dataangka[0:4]\r\ndata3 = dataangka [-2]\r\n# memotonh list\r\ndata4 = dataangka [:4]\r\ndata5 = dataangka [2:4]\r\ndata6 = dataangka [4:]\r\nprint (data1)\r\nprint (data2)\r\nprint (data3)\r\nprint (data4)\r\nprint (data5)\r\nprint (data6)\r\n\r\ndataNo = [100,200,300,400,500,600,700,800,900]\r\n\r\n# menambah list\r\ndatagabungan = dataangka + dataNo\r\n\r\n# merubah content dari list\r\nprint (datagabungan)\r\nprint (dataangka)\r\n\r\n# mengcopy list ke variabel baru\r\na = dataangka[:] # [:] == a akan mengakses semua data yang ada di data angka\r\na[4] = 98\r\nprint (dataangka)\r\nprint (a)\r\n\r\n# merubah content list dengan metode slicing\r\n\r\ndataangka [4:6] = [11,13]\r\nprint (dataangka)\r\n\r\n# list dalam list\r\nx = [dataangka, dataNo]\r\nprint (x)\r\n\r\n# mengakses list dalam multidensional list\r\ny = x [1] [4]\r\nprint (y)\r\n\r\n# menambah member dalam list\r\ndataangka.append(10)\r\nprint (dataangka)\r\n\r\n# membaca panjang list\r\npanjang_list = len(dataangka)\r\nprint (panjang_list)\r\n\r\n","sub_path":"numberandoperation/numberandoperation/numberandoperation.py","file_name":"numberandoperation.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"55807434","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nx = np.arange(start = 0, stop = 1, step = 0.01)\ny = np.sin(x * 2 * np.pi)\nplt.plot(x, y)\nplt.title('Sine Wave')\nplt.xlabel('2 * pi * x')\nplt.ylabel('y')\nplt.grid(True)\nplt.savefig('sine')\nplt.show()","sub_path":"Solutions/falconis/Task4/task4.py","file_name":"task4.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"355692244","text":"# Django settings for bbk_manager project.\nimport os\n\nDEBUG = False\nTEMPLATE_DEBUG = DEBUG\n\nSITE_ROOT = os.path.dirname(os.path.realpath(__file__))\n\nDATABASE_ENGINE = 'mysql' # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.\nDATABASE_NAME = 'DB_NAME' # Or path to database file if using sqlite3.\nDATABASE_USER = 'UESR' # Not used with sqlite3.\nDATABASE_PASSWORD = 'PASSWORD' # Not used with sqlite3.\nDATABASE_HOST = 'HOST_NAME' # Set to empty string for localhost. Not used with sqlite3.\nDATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.\nEMAIL_HOST = \"localhost\"\nEMAIL_PORT = \"\"\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# On Unix systems, a value of None will cause Django to use the same\n# timezone as the operating system.\n# If running in a Windows environment this must be set to the same as your\n# system time zone.\nTIME_ZONE = 'America/New_York'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'en-us'\n\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale\nUSE_L10N = True\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n)\n\nROOT_URLCONF = 'urls'\n\nTEMPLATE_DIRS = (\n os.path.join(SITE_ROOT, 'templates')\n)\n\nINSTALLED_APPS = (\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'bbk',\n)\n","sub_path":"bbk_manager/settings.example.py","file_name":"settings.example.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"499770316","text":"import utility\nfrom PyQt4 import QtGui\nfrom matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.figure import Figure\n\n\nclass MyMplCanvas(FigureCanvas):\n def __init__(self, parent=None, width=5, height=4, dpi=300, p='achievement'):\n fig = Figure(figsize=(width, height), dpi=dpi)\n self.axes = fig.add_subplot(111)\n self.axes.patch.set_facecolor('white')\n self.axes.patch.set_alpha(0.8)\n self.path = p\n self.compute_initial_figure()\n\n\n #self.axes.yaxis.set_visible(False)\n self.axes.xaxis.set_visible(False)\n fig.tight_layout(pad=0)\n\n FigureCanvas.__init__(self, fig)\n self.setParent(parent)\n\n FigureCanvas.setSizePolicy(self,\n QtGui.QSizePolicy.Expanding,\n QtGui.QSizePolicy.Expanding)\n FigureCanvas.updateGeometry(self)\n\n\nclass MyDynamicMplCanvas(MyMplCanvas):\n def __init__(self, *args, **kwargs):\n MyMplCanvas.__init__(self, *args, **kwargs)\n\n def compute_initial_figure(self):\n array = utility.open_achievement(self.path)\n\n array = increase_len(array, 100)\n for i in range(10):\n array = smooth(array, 20)\n\n self.axes.plot(list(range(len(array))), array, '')\n\n def update_figure(self):\n array = utility.open_achievement(self.path)\n\n array = increase_len(array, 100)\n for i in range(10):\n array = smooth(array, 20)\n\n self.axes.plot(list(range(len(array))), array, 'b')\n self.draw()\n\n\ndef increase_len(a, n):\n array = list()\n\n for i in range(len(a)):\n for j in range(n):\n array.append(a[i])\n\n return array\n\n\ndef smooth(a, n):\n array = list()\n for i in range(n//2, len(a)-n//2):\n array.append(sum(a[i-n//2:i+n//2])/n)\n\n return array","sub_path":"oralka/Graph_achievements.py","file_name":"Graph_achievements.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"106355202","text":"from bs4 import BeautifulSoup\nimport requests\n\nurl=\"https://www.instagram.com/{}/\"\ndef parse_data(s):\n data={}\n s=s.split(\"-\")[0]\n s=s.split(\" \")\n data['Followers']=s[0]\n data['Following']=s[2]\n data['Posts']=s[4]\n return data\n\ndef scrape_data(username):\n r=requests.get(url.format(username))\n s=BeautifulSoup(r.text,\"html.parser\")\n meta=s.find(\"meta\", property=\"og:description\")\n return parse_data(meta.attrs['content'])\n\nif __name__ == \"__main__\":\n try:\n username=input(\"\\nEnter your Instagram username: \")\n data=scrape_data(username)\n print(\"This account has : \",data[\"Following\"],\" Following\")\n print(\"This account has : \",data[\"Followers\"],\" Followers\")\n print(\"This account has : \",data[\"Posts\"],\" Posts\")\n except:\n print(\"Username not found, Please check the username you've entered\")","sub_path":"instagram_stats.py","file_name":"instagram_stats.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"393367898","text":"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport os\nimport unittest\nfrom collections import OrderedDict\nfrom typing import Dict\n\nimport numpy as np\nfrom test_util import GenArgList\n\nimport oneflow.compatible.single_client.unittest\nfrom oneflow.compatible import single_client as flow\nfrom oneflow.compatible.single_client import typing as tp\n\n\ndef _compare_triplet_margin_loss_with_np(\n anchor_shape,\n pos_shape,\n neg_shape,\n eps,\n margin,\n p,\n swap,\n device_type,\n machine_ids,\n device_counts,\n):\n anchor = np.random.random(size=anchor_shape).astype(np.float32)\n pos = np.random.random(size=pos_shape).astype(np.float32)\n neg = np.random.random(size=neg_shape).astype(np.float32)\n eps = eps\n assert device_type in [\"cpu\", \"gpu\"]\n flow.clear_default_session()\n if device_type == \"cpu\":\n flow.config.cpu_device_num(device_counts)\n else:\n flow.config.gpu_device_num(device_counts)\n func_config = flow.FunctionConfig()\n func_config.default_placement_scope(flow.scope.placement(device_type, machine_ids))\n func_config.default_logical_view(flow.scope.consistent_view())\n\n def np_triplet_margin_loss(np_anchor, np_pos, np_neg, eps, np_margin, np_p, swap):\n np_d_1_norm = np.power(np.abs(np_anchor - np_pos + eps), np_p)\n np_d_2_norm = np.power(np.abs(np_anchor - np_neg + eps), np_p)\n np_d_1 = np.power(np.sum(np_d_1_norm, axis=-1), 1.0 / np_p)\n np_d_2 = np.power(np.sum(np_d_2_norm, axis=-1), 1.0 / np_p)\n if swap:\n np_dist_swap = np.power(np.abs(np_pos - np_neg + eps), np_p)\n np_dist_swap = np.power(np.sum(np_dist_swap, axis=-1), 1.0 / np_p)\n np_d_2 = np.minimum(np_d_2, np_dist_swap)\n np_triplet_margin_loss = np.maximum(np_margin + np_d_1 - np_d_2, 0)\n np_triplet_margin_loss_mean = np.mean(np_triplet_margin_loss)\n np_triplet_margin_loss_sum = np.sum(np_triplet_margin_loss)\n return {\n \"np_triplet_margin_loss\": np_triplet_margin_loss,\n \"np_triplet_margin_loss_mean\": np_triplet_margin_loss_mean,\n \"np_triplet_margin_loss_sum\": np_triplet_margin_loss_sum,\n }\n\n np_out_tripletloss_dict = np_triplet_margin_loss(\n anchor, pos, neg, eps, margin, p, swap\n )\n\n def np_triplet_loss_diff(anchor, pos, neg, margin, p):\n def _compute_distance(x1, x2, x3):\n d_1_norm = np.power(np.abs(x1 - x2 + 1e-06), p)\n d_2_norm = np.power(np.abs(x1 - x3 + 1e-06), p)\n d_1 = np.power(np.sum(d_1_norm, axis=-1), 1.0 / p)\n d_2 = np.power(np.sum(d_2_norm, axis=-1), 1.0 / p)\n return d_1 - d_2 + margin\n\n def _compute_per_diff(x1, x2, p, eps=1e-06):\n _abs_index = np.where(x1 - x2 > 0, 1, -1)\n _abs_index_support = np.where(x1 - x2 == 0, 1, 0)\n _abs_grad = _abs_index + _abs_index_support\n _abs_val = np.abs(x1 - x2 + eps)\n _power_abs_val = np.power(_abs_val, p)\n _sum_val = np.sum(_power_abs_val, axis=1, keepdims=True)\n _sqrt_sum_val = np.power(_sum_val + eps, 1.0 / p - 1)\n _power_val = np.power(_abs_val, p - 1)\n _grad = np.multiply(_sqrt_sum_val, _power_val)\n _grad *= _abs_grad\n return _grad / x1.shape[0]\n\n d = _compute_distance(anchor, pos, neg)\n zero_index = np.where(d < -1e-06)\n anchor_grad_1 = _compute_per_diff(anchor, pos, p)\n anchor_grad_2 = _compute_per_diff(anchor, neg, p)\n total_grad = anchor_grad_1 - anchor_grad_2\n for i in zero_index:\n total_grad[i] = 0\n grad_dict = {\"np_triplet_loss_grad_mean\": total_grad}\n return grad_dict\n\n np_grad_dict = np_triplet_loss_diff(anchor, pos, neg, margin, p)\n\n def assert_prediction_grad(blob: tp.Numpy):\n assert np.allclose(blob, np_grad_dict[\"np_triplet_loss_grad_mean\"], atol=0.002)\n\n @flow.global_function(type=\"train\", function_config=func_config)\n def oneflow_marginloss(\n of_anchor: tp.Numpy.Placeholder(shape=anchor.shape),\n of_pos: tp.Numpy.Placeholder(shape=pos.shape),\n of_neg: tp.Numpy.Placeholder(shape=neg.shape),\n ) -> Dict[str, tp.Numpy]:\n with flow.scope.placement(device_type, \"0:0\"):\n v = flow.get_variable(\n shape=anchor.shape,\n dtype=flow.float32,\n initializer=flow.constant_initializer(0),\n name=\"x_var\",\n )\n x_anchor = of_anchor + v\n flow.watch_diff(x_anchor, assert_prediction_grad)\n triplet_marginloss = flow.nn.TripletMarginLoss(\n x_anchor,\n of_pos,\n of_neg,\n margin=margin,\n p=p,\n swap=swap,\n reduction=\"none\",\n name=\"of_tripletmarginloss\",\n )\n triplet_marginloss_mean = flow.nn.TripletMarginLoss(\n x_anchor,\n of_pos,\n of_neg,\n margin=margin,\n p=p,\n swap=swap,\n reduction=\"mean\",\n name=\"of_tripletmarginloss_mean\",\n )\n triplet_marginloss_sum = flow.nn.TripletMarginLoss(\n x_anchor,\n of_pos,\n of_neg,\n margin=margin,\n p=p,\n swap=swap,\n reduction=\"sum\",\n name=\"of_tripletmarginloss_sum\",\n )\n with flow.scope.placement(device_type, \"0:0\"):\n flow.optimizer.SGD(\n flow.optimizer.PiecewiseConstantScheduler([], [0.001]), momentum=0\n ).minimize(triplet_marginloss_mean)\n return {\n \"of_triplet_margin_loss\": triplet_marginloss,\n \"of_triplet_margin_loss_mean\": triplet_marginloss_mean,\n \"of_triplet_margin_loss_sum\": triplet_marginloss_sum,\n }\n\n of_out_tripletloss_dict = oneflow_marginloss(anchor, pos, neg)\n assert np.allclose(\n of_out_tripletloss_dict[\"of_triplet_margin_loss\"],\n np_out_tripletloss_dict[\"np_triplet_margin_loss\"],\n atol=0.001,\n )\n assert np.allclose(\n of_out_tripletloss_dict[\"of_triplet_margin_loss_mean\"],\n np_out_tripletloss_dict[\"np_triplet_margin_loss_mean\"],\n atol=0.001,\n )\n assert np.allclose(\n of_out_tripletloss_dict[\"of_triplet_margin_loss_sum\"],\n np_out_tripletloss_dict[\"np_triplet_margin_loss_sum\"],\n atol=0.001,\n )\n\n\ndef _gen_arg_dict(shape, eps, margin, p, swap, device_type, machine_ids, device_counts):\n arg_dict = OrderedDict()\n arg_dict[\"anchor_shape\"] = [shape]\n arg_dict[\"pos_shape\"] = [shape]\n arg_dict[\"neg_shape\"] = [shape]\n arg_dict[\"eps\"] = [eps]\n arg_dict[\"margin\"] = [margin]\n arg_dict[\"p\"] = [p]\n arg_dict[\"swap\"] = [swap]\n arg_dict[\"device_type\"] = [device_type]\n arg_dict[\"machine_ids\"] = [machine_ids]\n arg_dict[\"device_counts\"] = [device_counts]\n return arg_dict\n\n\n@flow.unittest.skip_unless_1n1d()\nclass Test_triplet_loss_1n1d(flow.unittest.TestCase):\n def test_triplet_margin_loss_cpu(test_case):\n arg_dict = _gen_arg_dict(\n shape=(3, 3),\n eps=1e-06,\n margin=1,\n p=1.5,\n swap=False,\n device_type=\"cpu\",\n machine_ids=\"0:0\",\n device_counts=1,\n )\n for arg in GenArgList(arg_dict):\n _compare_triplet_margin_loss_with_np(*arg)\n\n @unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only test cpu cases\")\n def test_margin_ranking_loss_gpu(test_case):\n arg_dict = _gen_arg_dict(\n shape=(3, 6),\n eps=1e-06,\n margin=1,\n p=2.0,\n swap=False,\n device_type=\"gpu\",\n machine_ids=\"0:0\",\n device_counts=1,\n )\n for arg in GenArgList(arg_dict):\n _compare_triplet_margin_loss_with_np(*arg)\n\n\n@flow.unittest.skip_unless_1n2d()\nclass Testmarginloss1n2d(flow.unittest.TestCase):\n @unittest.skipIf(os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"), \"only test cpu cases\")\n def test_margin_ranking_loss_1n2d(test_case):\n arg_dict = _gen_arg_dict(\n shape=(6, 6),\n eps=1e-06,\n margin=1,\n p=2.0,\n swap=False,\n device_type=\"gpu\",\n machine_ids=\"0:0-1\",\n device_counts=2,\n )\n for arg in GenArgList(arg_dict):\n _compare_triplet_margin_loss_with_np(*arg)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"python/oneflow/compatible/single_client/test/ops/test_TripletMarginLoss.py","file_name":"test_TripletMarginLoss.py","file_ext":"py","file_size_in_byte":9042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"351367714","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 15 14:34:20 2021\n\n@author: yf\n\"\"\"\n\nimport numpy as np\nimport math,os,sys\nimport random\nimport shutil\nfrom mpi4py import MPI\nfrom tqdm import tqdm\n\nfrom MCMTpy.gfs.pyfk_GFs.read_json import read_GFs_json\nfrom MCMTpy.sampler.pyfk_MH.read_sampler_json import read_Inv_json\nfrom MCMTpy.sampler.pyfk_MH.sampler_module import config_fk,get_GFs_sta_info,get_Sta_rand_data,write_inv_para,get_sigma,get_fixed_fm\nfrom MCMTpy.sampler.pyfk_MH.sampler_module import get_MISFIT_1,get_MISFIT_2,inv_output_file\n\n\n\n\n\n\n\n\n#%%########################################################################\n# -------------------\n# 1. main-function\n# -------------------\n###########################################################################w\n\ndef sample_MH(filename):\n \n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n size = comm.Get_size()\n\n\n #########################################################################\n # -----------------\n # 1.1. preprocess\n # -----------------\n #########################################################################\n\n if rank == 0:\n try:\n # read the inversion superparameter dict, station parameter dict, original data dict, source time function (obspy. Trace)\n Inv_para,Sta_inv_info,Sta_raw_data,Stf = read_Inv_json(filename)\n print(\"Setting: read inversion.json successful in path\",filename,flush=True)\n except Exception as inst:\n print(inst,flush=True)\n raise ValueError('read_Inv_json error')\n\n try:\n # read gfs database info\n Prepro_para,_,Source_Station_info_MPI = read_GFs_json(Inv_para[\"GFs_json_file\"])\n print(\"Setting: read built_GFs.json successful in path\",Inv_para[\"GFs_json_file\"],flush=True)\n except Exception as inst:\n print(inst,flush=True)\n raise ValueError('read_GFs_json error')\n\n\n MPI_n = Inv_para['MPI_n']\n Chains_n = Inv_para['Chains_n']\n\n\n #------------ a. make directory ------------#\n if os.path.exists(Inv_para[\"Output_path\"]):\n shutil.rmtree(Inv_para[\"Output_path\"])\n if os.path.exists(Inv_para[\"Output_path\"]) == False:\n os.makedirs(Inv_para[\"Output_path\"])\n print(\"Setting: mkdir Output_path successful in path \",Inv_para[\"Output_path\"],flush=True)\n\n\n #------------ b. output parameter info ------------#\n file_path = os.path.join(Inv_para[\"Output_path\"],'Inv_para_info.txt') \n write_inv_para(file_path,Inv_para,Sta_inv_info,Sta_raw_data,Stf)\n print(\"Setting: write Inv_para_info.txt successful in path \",Inv_para[\"Output_path\"],flush=True)\n print(\"\\n*****************************************************************\",flush=True)\n print(f\"Now begin sampling {Chains_n} Markov Chains with {MPI_n} cores ...\\n\",flush=True)\n\n\n else:\n Inv_para,Sta_inv_info,Sta_raw_data,Stf,Prepro_para,Source_Station_info_MPI = [None for _ in range(6)]\n\n\n\n #########################################################################\n # ------------------------------\n # 1.2. broadcast the variables\n # ------------------------------\n #########################################################################\n Inv_para = comm.bcast(Inv_para,root=0)\n Sta_inv_info = comm.bcast(Sta_inv_info,root=0)\n Sta_raw_data = comm.bcast(Sta_raw_data,root=0)\n Stf = comm.bcast(Stf,root=0)\n Prepro_para = comm.bcast(Prepro_para,root=0)\n Source_Station_info_MPI = comm.bcast(Source_Station_info_MPI,root=0)\n\n\n\n #########################################################################\n # -----------------------------------------\n # 1.3. MPI MCMC_inv loop through each chunk\n # -----------------------------------------\n #########################################################################\n try:\n MCMC_inv(Inv_para,\n Sta_inv_info,\n Sta_raw_data,\n Stf,\n Prepro_para,\n Source_Station_info_MPI)\n except Exception as inst:\n print(inst,flush=True)\n raise ValueError('MCMC_inv error')\n\n\n\n #########################################################################\n # -----------------------\n # 1.4. MPI comm.barrier\n # -----------------------\n #########################################################################\n comm.barrier()\n if rank == 0:\n print(\"\\n\\n*****************************************************************\",flush=True)\n print(\"Successful !\\nThe inversion process is completed.\\n\\n\",flush=True)\n sys.exit()\n\n\n\n\n\n\n\n#%%########################################################################\n# -------------------\n# 2. sub-function\n# -------------------\n###########################################################################\n\ndef MCMC_inv(Inv_para,Sta_inv_info,Sta_raw_data,Stf,Prepro_para,Source_Station_info_MPI):\n\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n size = comm.Get_size()\n\n\n #########################################################################\n # -----------------\n # 2.1. preprocess\n # -----------------\n #########################################################################\n Dt = Inv_para[\"Dt\"] # sampling rate of the raw data\n dt = Prepro_para[\"dt\"] # sampling rate of the gfs database\n\n if Dt!=dt:\n raise ValueError('Error!\\nGFs sample rate is not consistent with Raw_data!')\n\n Chains_n = Inv_para[\"Chains_n\"]\n N=Inv_para[\"N\"]\n com_noise_wave=1/(Inv_para[\"Noise_level_waveform\"]**2)\n com_noise_time=1/(Inv_para[\"Noise_level_phasetime\"]**2)\n InvType=Inv_para[\"InvType\"]\n FM0=Inv_para[\"FM0\"]\n N_k=Inv_para[\"N_k\"]\n N_mag=Inv_para[\"N_mag\"]\n FM_boundary = Inv_para[\"FM_boundary\"]\n\n source_prem,_,config_prem=config_fk(InvType)\n source_time_function = Stf\n\n\n\n\n\n #########################################################################\n # -----------------\n # 2.2. inv\n # -----------------\n #########################################################################\n \n for chain in range(0,Chains_n,1):\n #%% 2.2.1 process\n FM_1_all=[];FM_1_accept_all=[]\n FM_2_all=[];FM_2_accept_all=[]\n MISFIT_1_all=[];MISFIT_1_accept_all=[];\n MISFIT_2_all=[];MISFIT_2_accept_all=[];\n accpet_ratio_all=[]\n \n FM=FM0\n MISFIT_1 = 0\n MISFIT_2 = 0\n accept_num=0 # The number of accepted solutions\n generate_num=0 # The number of solutions generated\n\n\n if rank==0:\n bar = tqdm(range(0,round(N),1),position=rank,file=sys.stdout)\n print(\"\\tprint rank 0 as example:\",flush=True)\n else:\n bar = range(0,round(N),1)\n\n #%% 2.2.2 inv process\n for jj in bar:\n #------------ a. genrate new FM ------------#\n sigma = get_sigma(Inv_para,jj) # The standard deviation of randomly generating new FM\n Fixed_FM_used = get_fixed_fm(Inv_para,jj)\n FM_new=[] # [m0, str, dip rake, lat, lon, depth, t0] in dc\n \n for i in range(0,len(FM),1):\n if Fixed_FM_used[i]=='constant':\n fm_new = FM[i]\n else:\n fm_new = np.random.normal(loc=FM[i],scale=sigma[i],size=1)[0] \n \n if Fixed_FM_used[0]=='variable' and i==0 and (jj>=N_k) and jj<(N_k+N_mag):\n if ('Mw_mean' in locals().keys()):\n fm_new = Mw_mean+FM[0]\n else:\n fm_new = FM[0]\n FM_new.append(fm_new)\n \n FM_new_used = FM_new.copy()\n if InvType=='dc':\n if FM_new[1]>360 and FM_new[1]<720:\n FM_new_used[1]=FM_new_used[1]-360\n if FM_new[1]<0 and FM_new[1]>(-360):\n FM_new_used[1]=FM_new_used[1]+360\n\n if FM_new[3]>180 and FM_new[3]<540:\n FM_new_used[3]=FM_new_used[3]-360\n if FM_new[3]<(-180) and FM_new[3]>(-540):\n FM_new_used[3]=FM_new_used[3]+360\n\n #------------ b. the flag is used to determine whether the new solution is beyond the boundary ------------#\n Is_Beyond = False\n for i in range(0,len(FM),1):\n if (FM_new_used[i]FM_boundary[i][1]):\n Is_Beyond = True\n break\n\n\n #------------ c.forward model ------------#\n if Is_Beyond==True:\n print(jj+1,'th: Beyond\\n')\n continue\n # jj-=1\n else:\n # the number of accepted solutions\n generate_num = generate_num+1\n #------------ % c.1.forward model || gets info of all stations for a given single source ------------#\n GFs_sta_info = get_GFs_sta_info(FM_new_used[-4:-1],Inv_para[\"NET_STA\"],Prepro_para,Source_Station_info_MPI)\n\n\n #------------ % c.2.forward model || give FM and stf ------------#\n if InvType=='mt':\n # a):\n # The seismic moment given by the JSON file is MW magnitude.\n # pyfk uses dyne-cm units for the synthesis of the moment tensor MT solution, which is converted to M0\n source_mechanism = np.array(FM_new_used[0:7])\n mw = source_mechanism[0]\n source_mechanism[0] = np.power(10., 1.5 * mw + 16.1)\n elif InvType=='dc':\n # b):\n # when == dc, pyfk uses moment magnitude, which does not need to be changed\n source_mechanism = np.array(FM_new_used[0:4])\n elif InvType=='sf':\n # c):\n # The seismic moment given by the JSON file is MW magnitude.\n # pyfk uses dyne units when synthesing the moment tensor sf solution, which is converted to M0\n source_mechanism = np.array(FM_new_used[0:3])\n mw = source_mechanism[0]\n source_mechanism[0] = np.power(10., 1.5 * mw + 16.1)\n \n source_prem.update_source_mechanism(source_mechanism)\n\n\n #------------ % c.3.forward model syn,and stores all the stations data in a dictionary, and reads the TP Ts ------------#\n Sta_rand_data = get_Sta_rand_data(GFs_sta_info,config_prem,source_time_function)\n\n\n #------------ % c.4.MISFIT 1 ------------#\n if jj= 709:\n hr=1\n elif jj>=N_k and jj360 and FM_2_min[1]<720:\n FM_2_min_used[1]=FM_2_min_used[1]-360\n if FM_2_min[1]<0 and FM_2_min[1]>(-360):\n FM_2_min_used[1]=FM_2_min_used[1]+360\n\n if FM_2_min[3]>180 and FM_2_min[3]<540:\n FM_2_min_used[3]=FM_2_min_used[3]-360\n if FM_2_min[3]<(-180) and FM_2_min[3]>(-540):\n FM_2_min_used[3]=FM_2_min_used[3]+360\n\n # get accpet_ratio\n accpet_ratio=round(accept_num/generate_num,5)\n accpet_ratio_all.append(accpet_ratio)\n \n \n # get recommand Noise_level_phasetime and Noise_level_waveform\n if rank == 0:\n # stage 1: get diff_MISFIT_1 diff_MISFIT_1_mean\n # get difference between 2 sample of wg_misfit\n if len(MISFIT_1_all) > 1:\n diff_MISFIT_1 = MISFIT_1 - MISFIT_1_all[-2]\n else:\n diff_MISFIT_1 = 0\n # get mean difference between 2 sample of wg_misfit\n if len(MISFIT_1_all) < 100:\n diff_MISFIT_1_mean = np.mean(abs(np.diff(MISFIT_1_all)))/com_noise_time\n else:\n diff_MISFIT_1_mean = np.mean(abs(np.diff(np.array(MISFIT_1_all)[-100:])))/com_noise_time\n\n # stage 2: get diff_MISFIT_2 diff_MISFIT_2_mean\n # get difference between 2 sample of wg_misfit\n if len(MISFIT_2_all) > 1:\n diff_MISFIT_2 = MISFIT_2 - MISFIT_2_all[-2]\n else:\n diff_MISFIT_2 = 0\n # get mean difference between 2 sample of wg_misfit\n if len(MISFIT_2_all) < 100:\n diff_MISFIT_2_mean = np.mean(abs(np.diff(MISFIT_2_all)))/com_noise_wave\n else:\n diff_MISFIT_2_mean = np.mean(abs(np.diff(np.array(MISFIT_2_all)[-100:])))/com_noise_wave\n \n \n remin_Noise_level_phasetime = round(math.sqrt(abs(diff_MISFIT_1_mean/math.log(0.2))/2),5)\n remax_Noise_level_phasetime = round(math.sqrt(abs(diff_MISFIT_1_mean/math.log(0.5))/2),5)\n remin_Noise_level_waveform = round(math.sqrt(abs(diff_MISFIT_2_mean/math.log(0.2))/2),5)\n remax_Noise_level_waveform = round(math.sqrt(abs(diff_MISFIT_2_mean/math.log(0.5))/2),5)\n \n \n bar.write(f\"{com_noise_time},{com_noise_wave}\")\n \n bar.write(f\"Iter {jj+1} of {chain}th Markov-Chain: \")\n # MISFIT1 MISFIT2\n bar.write(f'MISFIT1:{round(MISFIT_1,5)} || min MISFIT1:{round(min(MISFIT_1_all),5)}')\n if jj max_border)].index\r\n print(f\"{len(outliers)} outliers detected in column {column}\")\r\n \r\n RFM_df.drop(outliers, inplace = True)\r\n \r\n# Rescaling the data\r\nrfm_df = RFM_df[['Monetary','Frequency','Recency']]\r\nscale_standardisation = StandardScaler()\r\n\r\nrfm_df_scaled = scale_standardisation.fit_transform(rfm_df)\r\n\r\nrfm_df_scaled = pd.DataFrame(rfm_df_scaled)\r\nrfm_df_scaled.columns = ['monetary','frequency','recency']\r\n\r\n# Modelling \r\n\r\nk_values = list(range(1,10))\r\nwcss_list = []\r\n\r\nfor k in k_values:\r\n kmeans = KMeans(n_clusters = k)\r\n kmeans.fit_transform(rfm_df_scaled)\r\n wcss_list.append(kmeans.inertia_)\r\n\r\nplt.plot(k_values,wcss_list)\r\nplt.xlabel(\"k\")\r\nplt.ylabel(\"WCSS Score\")\r\nplt.title(\"Within Cluster Sum of Squares - by k\")\r\nplt.tight_layout()\r\nplt.show()\r\n\r\n# final model with k = 3\r\nkmeans = KMeans(n_clusters = 3)\r\nkmeans.fit(rfm_df_scaled)\r\n\r\n# Assigning the labels with the data\r\nRFM_df['Cluster'] = kmeans.labels_\r\n\r\n\r\n# Plot the clusters\r\nsns.boxplot(x = 'Cluster no.', y = 'Monetary', data = RFM_df)\r\n\r\nsns.barplot(x='Cluster no.', y = 'Monetary', data = RFM_df)\r\nsns.barplot(x='Cluster no.', y = 'Recency', data = RFM_df)\r\nsns.barplot(x='Cluster no.', y = 'Frequency', data = RFM_df)\r\n\r\nplt.figure(figsize = (15,5))\r\nsns.scatterplot(x = RFM_df['Monetary'], \r\n y = RFM_df['Frequency'],\r\n hue = RFM_df['Cluster no.'],\r\n palette= sns.color_palette('hls',3))\r\nplt.show()\r\n\r\n# Lets look at the plot of clusters\r\nclusters = RFM_df.groupby('Cluster no.')\r\ncentroids = kmeans.cluster_centers_\r\n\r\nfor cluster,data in clusters:\r\n plt.scatter(data[\"Monetary\"],data[\"Frequency\"],data['Recency'],marker = \"o\",label = cluster)\r\n plt.scatter(centroids[cluster,0],centroids[cluster,1],marker = \"X\", color = \"Black\",s=300)\r\nplt.legend()\r\nplt.tight_layout()\r\nplt.show()\r\n\r\n\r\n# As we see its not perfectly grouped lets do feature selection and see the result\r\n\r\npca = PCA(n_components = None)\r\npca.fit(RFM_df)\r\n\r\n# Extract the expected variances across components\r\n\r\nexplained_variance = pca.explained_variance_ratio_\r\nexplained_variance_cumulative = pca.explained_variance_ratio_.cumsum()\r\n\r\n\r\npca = PCA(n_components = 3)\r\ncomponents = pca.fit_transform(RFM_df)\r\n\r\nfeatures = range(pca.n_components_)\r\nplt.bar(features, pca.explained_variance_ratio_, color = 'yellow')\r\nplt.xlabel('PCA componets')\r\nplt.ylabel('Variance %')\r\nplt.xticks(features)\r\nplt.tight_layout()\r\nplt.show()\r\n\r\npca_components = pd.DataFrame(components)\r\n# Lets build the model based on 2 components\r\n\r\nk_values = list(range(1,10))\r\nwcss_list = []\r\n\r\nfor k in k_values:\r\n kmeans = KMeans(n_clusters = k)\r\n kmeans.fit_transform(pca_components.iloc[:,:2])\r\n wcss_list.append(kmeans.inertia_)\r\n\r\nplt.plot(k_values,wcss_list, '-o',color = 'red')\r\nplt.xlabel(\"k\")\r\nplt.ylabel(\"WCSS Score\")\r\nplt.title(\"Within Cluster Sum of Squares - by k\")\r\nplt.tight_layout()\r\nplt.show()\r\n\r\n# it looks like again the optimal number of clusters is 3\r\nmodel = KMeans(n_clusters = 3)\r\n\r\nclusters = model.fit_predict(pca_components.iloc[:,:2])\r\nRFM_df['Cluster no.'] = clusters\r\n\r\nclusters = RFM_df.groupby('Cluster no.')\r\ncentroids = kmeans.cluster_centers_\r\n\r\nfor cluster,data in clusters:\r\n plt.scatter(data[\"Monetary\"],data[\"Frequency\"],data['Recency'],marker = \"o\",label = cluster)\r\n plt.scatter(centroids[cluster,0],centroids[cluster,1],marker = \"X\", color = \"Black\",s=300)\r\nplt.legend()\r\nplt.tight_layout()\r\nplt.show()\r\n\r\n\r\nplt.figure(figsize = (15,5))\r\nsns.scatterplot(x = RFM_df['Monetary'], \r\n y = RFM_df['Frequency'],\r\n hue = RFM_df['Cluster no.'],\r\n palette= sns.color_palette('hls',3))\r\nplt.show()\r\n\r\n\r\n\r\nfig = plt.figure(figsize=(21,10))\r\nax = fig.add_subplot(111, projection='3d')\r\nax.scatter(RFM_df[\"Monetary\"][RFM_df.Cluster == 0], RFM_df[\"Frequency\"][RFM_df.Cluster == 0], RFM_df[\"Recency\"][RFM_df.Cluster == 0], c='blue', s=60)\r\nax.scatter(RFM_df[\"Monetary\"][RFM_df.Cluster == 1],RFM_df[\"Frequency\"][RFM_df.Cluster == 1], RFM_df[\"Recency\"][RFM_df.Cluster == 1], c='red', s=60)\r\nax.scatter(RFM_df[\"Monetary\"][RFM_df.Cluster == 2], RFM_df[\"Frequency\"][RFM_df.Cluster == 2], RFM_df[\"Recency\"][RFM_df.Cluster == 2], c='yellow', s=60)\r\n\r\nax.view_init(30, 185)\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Kmeans_clustering_project_v2.py","file_name":"Kmeans_clustering_project_v2.py","file_ext":"py","file_size_in_byte":7258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"507408609","text":"# fname = \"hello.txt\"\n# file_object = open(fname,\"w\")\n# file_object.write(\"Hello World\")\n# file_object.close()\n\n# with open(fname,\"w\") as file_object:\n# file_object.write(\"Hello World Again!!\")\n\n# with open(fname,'r') as f:\n# print(f.read())\nimport os\nthis_file_path = os.path.abspath(__file__)\n# print(this_file_path)#/home/doomguy/Desktop/Coding/Python/Files/filehandling.py\nBASE_DIR = os.path.dirname(this_file_path)\n# print(BASE_DIR)#/home/doomguy/Desktop/Coding/Python/Files\nENTIRE_PROJECT_DIR = os.path.dirname(BASE_DIR)\n# print(ENTIRE_PROJECT_DIR)#/home/doomguy/Desktop/Coding/Python\nemail_txt = os.path.join(BASE_DIR,\"templates\",\"email.txt\")\ncontent = \"\"\nwith open(email_txt,'r') as f:\n content = f.read()\n\nprint(content.format(name = 'Rohit'))","sub_path":"Day 2 - Files and Downloading Files/filehandling.py","file_name":"filehandling.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"91571566","text":"\"\"\"\n SALTS XBMC Addon\n Copyright (C) 2014 tknorris\n\n This program is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with this program. If not, see .\n\"\"\"\nimport scraper\nimport xbmc\nimport urllib\nimport urlparse\nimport re\nimport xbmcaddon\nimport json\nimport base64\nfrom salts_lib import pyaes\nfrom salts_lib import log_utils\nfrom salts_lib.constants import VIDEO_TYPES\nfrom salts_lib.constants import QUALITIES\n\nBASE_URL = 'http://playboxhd.com/'\nSEARCH_URL = '/api/box?type=search&keyword=%s&os=Android&v=2.0.2&k=0'\nDETAIL_URL = '/api/box?type=detail&id=%s&os=Android&v=2.0.2&k=0'\nSTREAM_URL = '/api/box?type=stream&id=%s&os=Android&v=2.0.2&k=0'\nPB_KEY = base64.decodestring('cXdlcnR5dWlvcGFzZGZnaGprbHp4YzEyMzQ1Njc4OTA=')\nIV = '\\0' * 16\n\nRESULT_URL = '/video_type=%s&id=%s'\nQUALITY_MAP = {'720p': QUALITIES.HD720, '1080p': QUALITIES.HD1080, '360p': QUALITIES.MEDIUM, 'Auto': QUALITIES.HIGH}\n\n\nclass Playbox_Scraper(scraper.Scraper):\n base_url = BASE_URL\n\n def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):\n self.timeout = timeout\n self.base_url = xbmcaddon.Addon().getSetting('%s-base_url' % (self.get_name()))\n\n @classmethod\n def provides(cls):\n return frozenset([VIDEO_TYPES.TVSHOW, VIDEO_TYPES.SEASON, VIDEO_TYPES.EPISODE, VIDEO_TYPES.MOVIE])\n\n @classmethod\n def get_name(cls):\n return 'PlayBox'\n\n def resolve_link(self, link):\n return link\n\n def format_source_label(self, item):\n if 'resolution' in item:\n return '[%s] (%s) %s' % (item['quality'], item['resolution'], item['host'])\n else:\n return '[%s] %s' % (item['quality'], item['host'])\n\n def get_sources(self, video):\n source_url = self.get_url(video)\n sources = []\n if source_url:\n params = urlparse.parse_qs(source_url)\n # movie ids are to the catalog, episode ids are to the stream\n if video.video_type == VIDEO_TYPES.MOVIE:\n stream_id = self.__get_movie_stream_id(params['id'][0])\n else:\n stream_id = params['id'][0]\n\n if stream_id:\n stream_url = STREAM_URL % (stream_id)\n url = urlparse.urljoin(self.base_url, stream_url)\n html = self._http_get(url, cache_limit=.5)\n try:\n js_data = json.loads(html)\n except ValueError:\n log_utils.log('Invalid JSON returned for: %s' % (url), xbmc.LOGWARNING)\n else:\n for stream in js_data['data']:\n stream_url = self.__decrypt(base64.decodestring(stream['stream']))\n if stream['server'] == 'ggvideo':\n direct = True\n quality = self._gv_get_quality(stream_url)\n host = self._get_direct_hostname(stream_url)\n if 'http' not in stream_url: continue\n elif stream['server'] == 'amvideo':\n for match in re.finditer('> creating sandbox directory...\".format(self.id))\n create_sandbox_status = self.create_sandbox_dir()\n if create_sandbox_status != \"\":\n self.logger.error(\"[Submission {}] Could not create sandbox directory. Aborting...\".format(self.id))\n self.update_frontend(create_sandbox_status, self.set_results(TestStatus.INTERNAL_ERROR))\n return\n\n # Download the test files (if not downloaded already)\n self.logger.info(\"[Submission {}] >> downloading test files...\".format(self.id))\n download_tests_status = self.download_tests()\n if download_tests_status != \"\":\n self.logger.error(\"[Submission {}] Could not download tests properly. Aborting...\".format(self.id))\n self.update_frontend(download_tests_status, self.set_results(TestStatus.INTERNAL_ERROR))\n return\n\n # Save the source to a file so we can compile it later\n self.logger.info(\"[Submission {}] >> writing source code to file...\".format(self.id))\n write_source_status = self.write_source()\n if write_source_status != \"\":\n self.logger.error(\"[Submission {}] Could not write source file. Aborting...\".format(self.id))\n self.update_frontend(write_source_status, self.set_results(TestStatus.INTERNAL_ERROR))\n return\n\n # Send an update that the compilation has been started for this submission\n self.update_frontend(\"\", self.set_results(TestStatus.COMPILING))\n\n # Compile\n self.logger.info(\"[Submission {}] >> compiling...\".format(self.id))\n compile_status = self.compile()\n if compile_status != \"\":\n self.logger.info(\"[Submission {}] Could not compile solution. Stopping execution...\".format(self.id))\n self.update_frontend(compile_status, self.set_results(TestStatus.COMPILATION_ERROR))\n return\n\n # Execute each of the tests\n self.logger.info(\"[Submission {}] >> starting processing tests...\".format(self.id))\n run_status = self.process_tests()\n if run_status != \"\":\n self.logger.info(\"[Submission {}] Error while running the solution. Aborting...!\".format(self.id))\n self.update_frontend(run_status, self.set_results(TestStatus.INTERNAL_ERROR))\n return\n\n # Finished with this submission\n self.logger.info(\"[Submission {}] >> done with {}!\".format(self.id, self.id))\n self.update_frontend(\"DONE\")\n\n def update_frontend(self, message=\"\", results=None):\n # Merge current message and results with previous ones\n self.update_message = message\n if results is not None:\n for result in results:\n found = False\n for i in range(len(self.update_results)):\n if self.update_results[i]['position'] == result['position']:\n self.update_results[i] = result\n found = True\n break\n if not found:\n self.update_results.append(result)\n\n # Update every UPDATE_INTERVAL seconds so we don't spam the frontend too much\n # We're using time() instead of perf_counter() so we get a UNIX timestamp (with parts of seconds)\n # This info helps figure out WHEN exactly (date + hour) the solution was graded.\n if time() - self.update_timer > config.UPDATE_INTERVAL or self.update_message != \"\":\n self.update_timer = time()\n data = {\n \"id\": self.id,\n \"message\": self.update_message,\n \"results\": json.dumps(self.update_results),\n \"timestamp\": self.update_timer\n }\n # Make the updates asynchronous so we don't stop the execution of the tests\n Thread(target=send_request, args=[\"POST\", self.update_url, data]).start()\n\n def set_results(self, status):\n results = []\n for test in self.tests:\n results.append({\n \"position\": test[\"position\"],\n \"status\": status.name,\n \"score\": 0\n })\n return results\n\n def create_sandbox_dir(self):\n status = \"\"\n try:\n # Delete if already present (maybe regrade?)\n if path.exists(self.path_sandbox):\n shutil.rmtree(self.path_sandbox)\n # Create the submit testing directory\n makedirs(self.path_sandbox)\n except OSError as ex:\n status = str(ex)\n self.logger.error(\"[Submission {}] {}\".format(self.id, str(ex)))\n return status\n\n def download_test(self, test_name, test_hash):\n test_path = config.PATH_TESTS + test_hash\n\n # Check if file already exists\n if path.exists(test_path):\n return\n\n # If not, we should download it\n url = self.tests_url + test_name\n self.logger.info(\"[Submission {}] Downloading file {} with hash {} from URL: {}\".format(\n self.id, test_name, test_hash, url))\n response = send_request(\"GET\", url)\n if response.status_code != 200:\n self.logger.error(\"[Submission {}] Could not download test {} with hash {} using URL: {}\".format(\n self.id, test_name, test_hash, url))\n raise Exception(\"Could not download test file!\")\n\n with open(test_path, \"wb\") as file:\n # Write 1MB chunks from the file at a time\n for chunk in response.iter_content(config.FILE_DOWNLOAD_CHUNK_SIZE):\n file.write(chunk)\n\n def download_tests(self):\n # In case the directory for the tests does not exist, create it\n if not path.exists(config.PATH_DATA):\n makedirs(config.PATH_DATA)\n if not path.exists(config.PATH_TESTS):\n makedirs(config.PATH_TESTS)\n\n status = \"\"\n try:\n for test in self.tests:\n self.download_test(test[\"inpFile\"], test[\"inpHash\"])\n self.download_test(test[\"solFile\"], test[\"solHash\"])\n except Exception as ex:\n status = str(ex)\n self.logger.error(\"[Submission {}] {}\".format(self.id, str(ex)))\n return status\n\n def write_source(self):\n status = \"\"\n try:\n with open(self.path_source, \"w\") as file:\n file.write(self.source)\n except OSError as ex:\n status = \"Internal error: \" + str(ex)\n self.logger.error(\"[Submission {}] {}\".format(self.id, str(ex)))\n return status\n\n def compile(self):\n try:\n status = executor.submit(Compiler.compile, self.language, self.path_source, self.path_executable).result()\n except ValueError as ex:\n # If a non-compiler error occurred, log the message in addition to sending it to the user\n status = \"Internal error: \" + str(ex)\n self.logger.error(\"[Submission {}] {}\".format(self.id, str(ex)))\n return status\n\n def process_tests(self):\n start_time = perf_counter()\n runner = Runner(self)\n errors = \"\"\n\n test_futures = []\n for test in self.tests:\n test_futures.append([test, executor.submit(runner.run, test)])\n\n for test_future in test_futures:\n test, future = test_future\n try:\n # Wait for the test to be executed\n future.result()\n except ValueError as ex:\n errors += \"Internal error on test \" + test[\"inpFile\"] + \"(\" + test[\"inpHash\"] + \"): \" + str(ex)\n self.logger.error(\"[Submission {}] {}\".format(self.id, str(ex)))\n break\n except Exception as ex:\n self.logger.error(\"[Submission {}] Got exception: {}\".format(self.id, str(ex)))\n\n self.logger.info(\"[Submission {}] -- executed {} tests in {:.3f}s.\".format(\n self.id, len(self.tests), perf_counter() - start_time))\n return errors\n\n def cleanup(self):\n self.logger.info(\"[Submission {}] Cleaning up sandbox...\".format(self.id))\n if path.exists(self.path_sandbox):\n shutil.rmtree(self.path_sandbox)\n","sub_path":"grader/evaluator.py","file_name":"evaluator.py","file_ext":"py","file_size_in_byte":10896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"41444229","text":"from django.db.models.deletion import SET_NULL\nfrom django.db.models.fields import BLANK_CHOICE_DASH, NullBooleanField\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom store.forms import SignUpForm,CouponApplyForm,CheckOutForm,PaymentForm\nfrom store.models import Category,Brand,Product,Banner,Cart,CartItem,Order,OrderItem,Coupon\nfrom django.contrib.auth.models import Group,User\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.contrib.auth import login,logout , authenticate\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator,EmptyPage,InvalidPage\nfrom django.utils import timezone\nfrom django.contrib import messages\nfrom django.http import HttpResponse\nfrom django.urls import reverse\n\n# Create your views here.\n\ndef index(request):\n products = None\n \n banners = None\n banners = Banner.objects.all()\n\n \n products = Product.objects.all().filter(available=True)[:10]\n products_lastest = Product.objects.all().order_by('-id')[:10]\n category = Category.objects.all()\n brands = Brand.objects.all()\n \n return render(request, 'index.html',{'products':products,'banners':banners,'catagory':category,\n 'products_lastest': products_lastest, 'brands':brands })\n \n\n\ndef product_by_category(request,category_slug=None):\n products = None\n category_page = None\n\n category_page = get_object_or_404(Category, slug=category_slug )\n\n catagory_ancestor = category_page.get_ancestors(ascending=False, include_self=True)\n\n if category_page.is_leaf_node() == True : # parent\n products = Product.objects.all().filter(category = category_page , available=True)\n\n \n else:\n products = Product.objects.none()\n category_set = category_page.get_descendants(include_self=False)\n for cat in category_set :\n \n if cat.is_leaf_node() == True:\n \n products_ = Product.objects.all().filter(category=cat, available=True)\n products = products.union(products_)\n print(products)\n \n else: \n \n continue\n\n\n paginator = Paginator(products.order_by('id'),3)\n try:\n page=int(request.GET.get('page','1'))\n except:\n page=1\n\n try: \n productperPage = paginator.page(page)\n except (EmptyPage,InvalidPage):\n productperPage = paginator.page(paginator.num_pages)\n \n\n\n return render(request, 'products.html',{'products':productperPage,'category':category_page, 'catagory_ancestor':catagory_ancestor })\n \n\ndef product_by_brand(request,brand_slug=None):\n products = None\n brand_page = None\n\n \n brand_page = get_object_or_404(Brand, slug= brand_slug )\n products = Product.objects.all().filter(brand = brand_page , available=True)\n\n paginator = Paginator(products.order_by('id'),3)\n try:\n page=int(request.GET.get('page','1'))\n except:\n page=1\n\n try: \n productperPage = paginator.page(page)\n except (EmptyPage,InvalidPage):\n productperPage = paginator.page(paginator.num_pages)\n\n return render(request, 'products.html',{'products':productperPage,'brand':brand_page})\n\n\n\n\ndef productPage(request,product_id):\n try:\n \n product = Product.objects.get( id=product_id )\n except Exception as e :\n raise e\n\n return render(request, 'productdetail.html',{'product':product})\n\n\n\ndef _cart_id(request):\n cart = request.session.session_key\n if not cart :\n cart = request.session.create()\n return cart\n\n\n\n@login_required(login_url='signIn')\ndef addCart(request,product_id):\n \n if request.method == 'POST':\n \n quantity = request.POST['number']\n\n #ดึงสินค้าที่จะซื้อ\n product = Product.objects.get(id=product_id)\n\n if int(quantity) <= product.stock : #สินค้ามีstock พอให้ซื้อ\n \n #สร้างตะกร้า\n try:\n cart=Cart.objects.get(cart_id=_cart_id(request))\n except Cart.DoesNotExist :\n cart=Cart.objects.create(cart_id=_cart_id(request))\n cart.save()\n\n try:\n #ซื้อรายการสินค้าซ้ำ\n cart_item=CartItem.objects.get(product=product, cart=cart)\n if int(quantity) <= cart_item.product.stock - cart_item.quantity:\n\n #เปลี่ยนจำนวนรายการสินค้า\n cart_item.quantity+= int(quantity)\n cart_item.save()\n else:\n print('ssssssssssssssssssssssssssssssssssssss')\n messages.error(request,'จำนวนสินค้ามีไม่พอ')\n return redirect(request.META.get('HTTP_REFERER', 'redirect_if_referer_not_found'))\n except CartItem.DoesNotExist :\n #ซื้อรายการสินค้าครั้งแรก\n #บันทึกลงDB\n cart_item=CartItem.objects.create(\n product = product,\n cart = cart,\n quantity = quantity\n )\n cart_item.save()\n print('quantity = ',quantity)\n \n else:\n messages.error(request,'จำนวนสินค้ามีไม่พอ')\n return redirect(request.META.get('HTTP_REFERER', 'redirect_if_referer_not_found'))\n \n\n\n return redirect(request.META.get('HTTP_REFERER', 'redirect_if_referer_not_found'))\n\n\n\ndef cartdetail(request):\n total = 0\n counter =0\n cart_items = None\n discount_price = None\n new_total= None\n coupon = None\n coupon_id = 0\n add_coupon = False\n\n\n try:\n cart =Cart.objects.get(cart_id=_cart_id(request)) #ดึงตะกร้า\n cart_items =CartItem.objects.filter(cart=cart, active=True) #ดึงข้อมูลสินค้าในตะกร้า\n for item in cart_items:\n total+=(item.product.price*item.quantity)\n counter+=(item.quantity)\n except Exception as e:\n pass\n \n\n\n if request.method == 'POST':\n\n if 'redeem' in request.POST:\n\n now = timezone.now()\n form = CouponApplyForm(request.POST)\n if form.is_valid():\n code = form.cleaned_data.get('code')\n \n try:\n coupon = Coupon.objects.get(code__iexact = code,\n valid_from__lte = now,\n valid_to__gte = now,\n active = True)\n\n if total >= coupon.minimum:\n add_coupon = True\n new_total = total - coupon.discount\n coupon_id = coupon.id\n \n \n else:\n add_coupon = False\n new_total= total\n messages.error(request,'ราคาของสินค้าไม่ถึงราคาขั้นต่ำที่กำหนดไว้')\n form = CouponApplyForm()\n #return redirect(request.META.get('HTTP_REFERER', 'redirect_if_referer_not_found'))\n\n except Coupon.DoesNotExist:\n messages.error(request,'ไม่พบคูปอง หรือ หมดอายุ')\n new_total= total\n add_coupon = False\n \n\n else:\n form = CouponApplyForm()\n new_total = total\n \n\n return render(request, 'cartdetail.html', dict(cart_items=cart_items, total=total, \n counter=counter, new_total=new_total, add_coupon=add_coupon,\n coupon=coupon, form=form, coupon_id=coupon_id))\n\n\n\n\n\ndef removeCoupon(request):\n add_coupon = False\n return redirect('cartDetail')\n\n\ndef removeCart(request, product_id):\n cart= Cart.objects.get(cart_id = _cart_id(request))\n product= get_object_or_404(Product, id =product_id)\n cartItem = CartItem.objects.get(product=product, cart=cart)\n cartItem.delete()\n\n return redirect('cartDetail')\n\n\n\ndef brand(request):\n return render(request, 'brand.html') \n\n\n\ndef signUpView(request):\n if request.method == 'POST':\n form=SignUpForm(request.POST)\n if form.is_valid():\n #บันทึกข้อมูล user\n form.save()\n #บันทึก Group Customer\n #ดึงusername มาใช้\n username = form.cleaned_data.get('username')\n #ดึงข้อมูล user จากฐานข้อมูล\n signUpUser = User.objects.get(username = username)\n #จัด Group\n customer_group = Group.objects.get(name=\"Customer\")\n customer_group.user_set.add(signUpUser)\n return redirect('signIn')\n else : \n form = SignUpForm()\n return render(request, \"signup.html\", {'form':form})\n\n\n\ndef signInView(request):\n if request.method == 'POST':\n form = AuthenticationForm(data=request.POST)\n if form.is_valid():\n username =request.POST['username']\n password =request.POST['password']\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect('home')\n else:\n return redirect('signUp')\n\n else:\n\n form = AuthenticationForm()\n return render(request,'signIn.html', {'form':form})\n\n\n\ndef signOutView(request):\n logout(request)\n return redirect('signIn')\n\n \n\ndef checkOutView(request,coupon_id):\n total = 0\n counter = 0\n cart_items = None\n discount_price = None\n new_total= None\n add_coupon = False\n coupon = None\n \n\n try:\n cart =Cart.objects.get(cart_id=_cart_id(request)) #ดึงตะกร้า\n cart_items =CartItem.objects.filter(cart=cart, active=True) #ดึงข้อมูลสินค้าในตะกร้า\n for item in cart_items:\n total+=(item.product.price*item.quantity)\n counter+=(item.quantity)\n except Exception as e:\n pass\n\n form = CheckOutForm(request.POST)\n\n \n\n if request.method == 'POST': \n \n if 'confirm' in request.POST:\n print('Uppppppppppppppppppppppppppppppppp')\n if coupon_id == 0:\n new_total = total\n\n else:\n coupon = Coupon.objects.get(id=coupon_id)\n new_total = total - coupon.discount\n add_coupon = True\n\n\n else:\n print('Downnnnnnnnnnnnnnnnnnnnnnnnnnnnnn')\n if coupon_id == 0:\n new_total = total\n\n else:\n coupon = Coupon.objects.get(id=coupon_id)\n new_total = total - coupon.discount\n add_coupon = True\n form = CheckOutForm(request.POST)\n if form.is_valid():\n now = timezone.now()\n data = Order()\n\n data.first_name = form.cleaned_data['first_name']\n data.last_name = form.cleaned_data['last_name']\n data.phone = form.cleaned_data['phone']\n data.user_id = request.user.username\n data.address = form.cleaned_data.get('address')\n data.city = form.cleaned_data.get('city')\n data.district = form.cleaned_data.get('district')\n data.subdistrict = form.cleaned_data.get('subdistrict')\n data.postcode = form.cleaned_data.get('postcode')\n data.total = new_total\n data.status = 'รอชำระเงิน'\n if coupon_id != 0:\n data.code = coupon.code\n data.save()\n \n \n\n for item in cart_items:\n order_item = OrderItem.objects.create(\n product = item.product.name,\n quantity = item.quantity,\n price = item.product.price,\n order = data\n )\n order_item.save()\n #ลดจำนวนstock\n product = Product.objects.get(id = item.product.id)\n product.stock = int(item.product.stock - order_item.quantity)\n product.save()\n item.delete()\n order = Order.objects.get(id=data.id)\n return redirect(order.get_url())\n\n else: \n form = CheckOutForm()\n new_total = total\n \n return render(request, \"checkout.html\", dict(cart_items=cart_items, total=total, counter=counter,form=form \n , new_total= new_total ,coupon= coupon , add_coupon= add_coupon,\n coupon_id = coupon_id ))\n\n\n\ndef paymentView(request,order_id):\n \n if request.method == 'POST' :\n form = PaymentForm(request.POST,request.FILES)\n if form.is_valid():\n order = Order.objects.get(id=order_id)\n order.slip = form.cleaned_data['slip']\n order.status = 'ชำระเงินแล้ว-รอตรวจสอบ'\n order.save()\n return redirect('thankyou')\n else : \n form = PaymentForm()\n order = Order.objects.get(id=order_id)\n total = order.total\n\n\n return render(request,'payment.html',dict(form=form, order_id=order_id, total=total))\n\n\n\ndef orderHistory(request):\n if request.user.is_authenticated:\n username = str(request.user.username)\n orders = Order.objects.filter(user_id=username)\n\n\n paginator = Paginator(orders.order_by('-id'),10)\n try:\n page=int(request.GET.get('page','1'))\n except:\n page=1\n\n try: \n orderperPage = paginator.page(page)\n except (EmptyPage,InvalidPage):\n orderperPage = paginator.page(paginator.num_pages)\n return render(request, 'order.html', {'orders':orderperPage})\n\n\n\n\ndef viewOrder(request,order_id):\n coupon = None\n if request.user.is_authenticated:\n username = str(request.user.username)\n order = Order.objects.get(user_id=username,id=order_id)\n orderitem = OrderItem.objects.filter(order=order)\n try:\n coupon = Coupon.objects.get(code=order.code)\n if coupon == None:\n coupon = None\n \n except Coupon.DoesNotExist:\n pass\n\n return render(request, 'viewOrder.html', {'order':order, 'order_item':orderitem, 'coupon':coupon})\n\n\n\ndef search(request):\n title = request.GET.get('title')\n products = Product.objects.none()\n products_name = Product.objects.filter(name__icontains = request.GET.get('title', None) , available=True)\n products_category = Product.objects.filter(category__name__icontains = request.GET.get('title') , available=True)\n products_brand = Product.objects.filter(brand__name__icontains = request.GET.get('title') , available=True)\n products = products.union(products_name)\n products = products.union(products_category)\n products = products.union(products_brand)\n\n\n\n paginator = Paginator(products.order_by('id'),2)\n try:\n page=int(request.GET.get('page','1'))\n except:\n page=1\n\n try: \n productperPage = paginator.page(page)\n except (EmptyPage,InvalidPage):\n productperPage = paginator.page(paginator.num_pages)\n\n return render(request, 'products.html',{'products':productperPage, 'title':title})\n\n \n \ndef thankyou(request):\n return render(request, 'thankyou.html')\n","sub_path":"shop/store/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":16222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"647406871","text":"from django.urls import include, path\nfrom rest_framework import routers\n\nfrom .viewsets import (DivisionLevelViewSet, DivisionViewSet, GeometryViewSet,\n PointLabelOffsetViewSet, PointViewSet)\n\nrouter = routers.DefaultRouter()\n\nrouter.register(r\"divisions\", DivisionViewSet)\nrouter.register(r\"geometries\", GeometryViewSet)\nrouter.register(r\"division-levels\", DivisionLevelViewSet)\nrouter.register(r\"point-label-offsets\", PointLabelOffsetViewSet)\nrouter.register(r\"points\", PointViewSet)\n\nurlpatterns = [path(\"api/\", include(router.urls))]\n","sub_path":"geography/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"310463329","text":"class Rectangle():\n\n # init = constructor\n def __init__(self, w, h, *args):\n width = w\n height = h\n print(\"rectangle: width is \" + str(width) + \" and height is \" + str(h))\n\n\nclass Square():\n\n def set_size(self, w, *args):\n width = w\n height = w\n print(\"square: width is \" + str(width))\n\n\nrect = Rectangle(3, 4)\n\nsquare = Square()\nsquare.set_size(8)","sub_path":"classes/what_is_init.py","file_name":"what_is_init.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"613571215","text":"import numpy as np\nfrom attention import attention\nimport tensorflow as tf\n\ntf.compat.v1.disable_v2_behavior()\n\nclass Model(object):\n def __init__(self,\n nh1, # nh1表示第1层rnn神经元的个数450\n nh2, # nh2表示第2层rnn神经元的个数450\n ny, # ny: 第1层rnn输出的类别数\n nz, # nz: 第2层rnn输出的类别数\n de, # emb_dimension: 300\n cs,\n lr, # 学习率\n lr_decay,\n embedding, # 词向量\n max_gradient_norm,\n batch_size,\n model_cell='lstm',\n nonstatic=False):\n self.batch_size = batch_size\n self.input_x = tf.compat.v1.placeholder(tf.int32, shape=[None, None, cs],name='input_x') # input_x.shape=(None,None,3)\n self.input_y = tf.compat.v1.placeholder(tf.int32, shape=[None, None],name=\"input_y\") # input_y.shape = (None,None)\n self.input_z = tf.compat.v1.placeholder(tf.int32, shape=[None, None],name='input_z') # input_z.shape = (None,None)\n self.keep_prob = tf.compat.v1.placeholder(tf.float32)\n\n self.lr = tf.Variable(lr, dtype=tf.float32)\n\n self.learning_rate_decay_op = self.lr.assign(self.lr * lr_decay)\n\n # Creating embedding input\n with tf.device(\"/cpu:0\"), tf.name_scope('embedding'):\n if nonstatic:\n W = tf.constant(embedding, name='embW', dtype=tf.float32)\n else:\n W = tf.Variable(embedding, name='embW', dtype=tf.float32)\n inputs = tf.nn.embedding_lookup(W, self.input_x)\n inputs = tf.reshape(inputs, [self.batch_size, -1, cs * de]) # (16,?,900)\n\n inputs = tf.nn.dropout(inputs, rate=1 - self.keep_prob, name='drop_inputs')\n\n with tf.device(\"/gpu:0\"):\n # Create the internal multi-layer cell for rnn\n if model_cell == 'rnn':\n single_cell1 = tf.nn.rnn_cell.BasicRNNCell(nh1) # nh1表示神经元的个数,450\n single_cell2 = tf.nn.rnn_cell.BasicRNNCell(nh2) # nh2表示神经元的个数,450\n elif model_cell == 'lstm':\n single_cell1 = tf.compat.v1.nn.rnn_cell.BasicLSTMCell(nh1, state_is_tuple=True)\n single_cell2 = tf.compat.v1.nn.rnn_cell.BasicLSTMCell(nh2, state_is_tuple=True)\n elif model_cell == 'gru':\n single_cell1 = tf.nn.rnn_cell.GRUCell(nh1)\n single_cell2 = tf.nn.rnn_cell.GRUCell(nh2)\n else:\n raise Exception('model_cell error!')\n # DropoutWrapper rnn_cell\n self.single_cell1 = tf.compat.v1.nn.rnn_cell.DropoutWrapper(single_cell1, output_keep_prob=self.keep_prob)\n self.single_cell2 = single_cell2\n # self.single_cell2 = tf.compat.v1.nn.rnn_cell.DropoutWrapper(single_cell2, output_keep_prob=self.keep_prob)\n self.init_state = self.single_cell1.zero_state(self.batch_size, dtype=tf.float32)\n\n # RNN1\n with tf.compat.v1.variable_scope('rnn1'):\n # rnn_conv_1\n self.rnn_outputs1, self.rnn_state1 = tf.compat.v1.nn.dynamic_rnn(\n # self.rnn_conv_outputs1, self.rnn_conv_state1 = tf.compat.v1.nn.dynamic_rnn( # self.rnn_conv_outputs1 (16, ?, 450), self.rnn_conv_state1 (16, 450)\n cell=self.single_cell1,\n inputs=inputs,\n initial_state=self.init_state,\n dtype=tf.float32\n )\n # Attention layer1\n # attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(num_units=self.rnn_size, memory=encoder_outputs,\n # memory_sequence_length=encoder_inputs_length)\n attention_mechanism = tf.contrib.seq2seq.LuongAttention(num_units=nh1, memory=self.rnn_outputs1)\n\n att_cell = tf.contrib.seq2seq.AttentionWrapper(cell=self.single_cell2, attention_mechanism=attention_mechanism,\n attention_layer_size=nh2, name='Attention_Wrapper')\n # with tf.name_scope('Attention_layer'):\n # attention_output1, alphas1 = attention(self.rnn_conv_outputs1, ATTENTION_SIZE, return_alphas=True)\n # tf.summary.histogram('alphas', alphas1)\n #\n # Dropout Attention1\n self.att1_out = tf.compat.v1.nn.rnn_cell.DropoutWrapper(att_cell, output_keep_prob=self.keep_prob)\n self.att_initial_state = self.att1_out.zero_state(batch_size=self.batch_size, dtype=tf.float32)\n\n # RNN2\n with tf.compat.v1.variable_scope('rnn2'):\n # rnn_conv_2\n self.rnn_out2, self.rnn_state2 = tf.compat.v1.nn.dynamic_rnn(\n cell=self.att1_out,\n inputs=self.rnn_outputs1,\n initial_state=self.att_initial_state,\n dtype=tf.float32\n )\n # rnn_conv_2 old\n # self.rnn_conv_outputs2, self.rnn_conv_state2 = tf.compat.v1.nn.dynamic_rnn(\n # cell=self.single_cell2,\n # inputs=self.rnn_conv_outputs1,\n # initial_state=self.init_state,\n # dtype=tf.float32\n # )\n\n # Attention layer2\n # with tf.name_scope('Attention_layer'):\n # attention_output2, alphas2 = attention(self.rnn_conv_outputs2, ATTENTION_SIZE, return_alphas=True)\n # tf.summary.histogram('alphas', alphas2)\n #\n # Dropout\n # self.att2_out = tf.nn.dropout(attention_output2, rate=1-0.8, name='drop_att_2')\n # self.rnn_out2 = tf.nn.dropout(self.rnn_out2, rate=1 - 0.8, name='drop_att_2')\n\n # outputs_y\n with tf.compat.v1.variable_scope('output_sy'):\n w_y = tf.compat.v1.get_variable(\"softmax_w_y\", [nh1, ny]) # w_y (450, 2)\n b_y = tf.compat.v1.get_variable(\"softmax_b_y\", [ny]) # b_y (2, )\n rnn_outputs1 = tf.reshape(self.rnn_outputs1, [-1, nh1]) # rnn_ori_outputs1 (?, 450)\n sy = tf.compat.v1.nn.xw_plus_b(rnn_outputs1, w_y, b_y) # sy (?, 2)\n self.sy_pred = tf.reshape(tf.argmax(sy, 1), [self.batch_size, -1]) # sy_pred (16, ?)\n # outputs_z\n with tf.compat.v1.variable_scope('output_sz'):\n w_z = tf.get_variable(\"softmax_w_z\", [nh2, nz]) # w_z (450, 5)\n b_z = tf.get_variable(\"softmax_b_z\", [nz]) # b_z (5, )\n rnn_outputs2 = tf.reshape(self.rnn_out2, [-1, nh2]) # rnn_ori_outputs2 (?, 450) ######################################\n sz = tf.compat.v1.nn.xw_plus_b(rnn_outputs2, w_z, b_z) # sz (?, 5)\n self.sz_pred = tf.reshape(tf.argmax(sz, 1), [self.batch_size, -1]) # sz_pred (16, ?)\n # loss\n with tf.compat.v1.variable_scope('loss'):\n label_y = tf.reshape(self.input_y, [-1]) # label_y (?, )\n loss1 = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label_y, logits=sy) # loss1 (?, )\n label_z = tf.reshape(self.input_z, [-1]) # label_z (?, )\n loss2 = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label_z, logits=sz) # loss2 (?, )\n self.loss = tf.reduce_sum(0.5 * loss1 + 0.5 * loss2) / tf.cast(self.batch_size, tf.float32)\n\n tvars = tf.compat.v1.trainable_variables()\n grads, _ = tf.clip_by_global_norm(tf.gradients(self.loss, tvars), max_gradient_norm)\n #optimizer = tf.compat.v1.train.GradientDescentOptimizer(self.lr)\n optimizer = tf.compat.v1.train.AdamOptimizer(self.lr)\n self.train_op = optimizer.minimize(self.loss)\n\n def cost(output, target):\n # Compute cross entropy for each frame.\n cross_entropy = target * tf.log(output)\n cross_entropy = -tf.reduce_sum(cross_entropy, reduction_indices=2)\n mask = tf.sign(tf.reduce_max(tf.abs(target), reduction_indices=2))\n cross_entropy *= mask\n # Average over actual sequence lengths.\n cross_entropy = tf.reduce_sum(cross_entropy, reduction_indices=1)\n cross_entropy /= tf.reduce_sum(mask, reduction_indices=1)\n return tf.reduce_mean(cross_entropy)\n\n\n","sub_path":"models/mymodel_LSTM_attention.py","file_name":"mymodel_LSTM_attention.py","file_ext":"py","file_size_in_byte":8441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"629000558","text":"from django.urls import path\n\nfrom blog.views import (\n PostListView,\n PostDetailView,\n PostFormView,\n PostDecipherFormView,\n delete_post,\n get_random_tags,\n get_deciphers_by_post\n)\n\nurlpatterns = [\n path('', PostListView.as_view(), name='post-list'),\n path('', PostDetailView.as_view(), name='post-detail'),\n path('/delete', delete_post, name='post-delete'),\n path('posts//deciphers', get_deciphers_by_post, name='post-decipher-list'),\n path('posts//deciphers//form', PostDecipherFormView.as_view(), name='post-decipher-form'),\n path('form', PostFormView.as_view(), name='post-form'),\n path('random_tags', get_random_tags, name='post-random-tags'),\n]\n","sub_path":"personal-site/blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"14247575","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('loving', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='login',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=120, null=True, blank=True)),\n ('password', models.CharField(max_length=100)),\n ],\n options=None,\n bases=None,\n ),\n migrations.AlterField(\n model_name='loving',\n name='email',\n field=models.EmailField(max_length=254, blank=True),\n preserve_default=True,\n ),\n ]\n","sub_path":"src/loving/migrations/0002_auto_20150412_2309.py","file_name":"0002_auto_20150412_2309.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"371936881","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('oj_core', '0006_auto_20150902_0002'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='status',\n name='code_length',\n field=models.PositiveIntegerField(default=123),\n preserve_default=False,\n ),\n ]\n","sub_path":"oj_core/migrations/0007_status_code_length.py","file_name":"0007_status_code_length.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"246245482","text":"from Vocabulary import Vocabulary\n\n\nclass SequenceVocabulary(Vocabulary):\n def __init__(self, token_to_idx=None, unk_token=\"\",\n mask_token=\"\", begin_seq_token=\"\",\n end_seq_token=\"\"):\n\n super(SequenceVocabulary, self).__init__(token_to_idx)\n\n self._mask_token = mask_token\n self._unk_token = unk_token\n self._begin_seq_token = begin_seq_token\n self._end_seq_token = end_seq_token\n\n self.mask_index = self.add_token(self._mask_token)\n self.unk_index = self.add_token(self._unk_token)\n self.begin_seq_index = self.add_token(self._begin_seq_token)\n self.end_seq_index = self.add_token(self._end_seq_token)\n\n def to_serializable(self):\n contents = super(SequenceVocabulary, self).to_serializable()\n contents.update({'unk_token': self._unk_token,\n 'mask_token': self._mask_token,\n 'begin_seq_token': self._begin_seq_token,\n 'end_seq_token': self._end_seq_token})\n return contents\n\n def lookup_token(self, token):\n \"\"\"Retrieve the index associated with the token\n or the UNK index if token isn't present.\n\n Args:\n token (str): the token to look up\n Returns:\n index (int): the index corresponding to the token\n Notes:\n `unk_index` needs to be >=0 (having been added into the Vocabulary)\n for the UNK functionality\n \"\"\"\n if self.unk_index >= 0:\n return self._token_to_idx.get(token, self.unk_index)\n else:\n return self._token_to_idx[token]","sub_path":"SequenceVocabulary.py","file_name":"SequenceVocabulary.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"173950441","text":"# coding=utf-8\nfrom django.db import models, connection, ProgrammingError, OperationalError\n\nimport os\nimport sys\nimport django\nimport xlrd\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + '/settings.py')\nos.environ['DJANGO_SETTINGS_MODULE'] = 'zno.settings'\ndjango.setup()\n\nfilename = 'data.xls'\n\nrb = xlrd.open_workbook(os.path.dirname(os.path.abspath(__file__)) + \"/\" + filename, formatting_info=False)\nsheet = rb.sheet_by_index(0)\n\ncursor = connection.cursor()\n\nfor row in range(2, sheet.nrows):\n item = sheet.row_values(row)\n\n data = {}\n data['region'] = item[0]\n data['city'] = item[1]\n data['university'] = item[2].replace(\"\\\"\", \"'\")\n data['direction_name'] = item[3]\n data['direction_code'] = item[4]\n\n data['test_first'] = item[5]\n data['test_first_level'] = 1 if item[6] == u\"базовий\" else 2\n data['test_first_weight'] = item[7]\n data['test_first_min_ball'] = 0 if item[8] in (\"\", u\"склав\") else item[8]\n\n data['test_second'] = \"\" if item[9] == \"\" else item[9]\n data['test_second_level'] = 1 if item[10] == u\"базовий\" else 2\n data['test_second_weight'] = item[11]\n data['test_second_min_ball'] = 0 if item[12] in (\"\", u\"склав\") else item[12]\n\n data['test_third'] = \"\" if item[13] == \"\" else item[13]\n data['test_third_level'] = 1 if item[14] == u\"базовий\" else 2\n data['test_third_weight'] = item[15]\n data['test_third_min_ball'] = 0 if item[16] in (\"\", u\"склав\") else item[16]\n\n data['test_fourth'] = \"\" if item[17] == \"\" else item[17]\n data['test_fourth_level'] = 1 if item[18] == u\"базовий\" else 2\n data['test_fourth_weight'] = 0 if item[19] == \"\" else item[19]\n data['test_fourth_min_ball'] = 0 if item[20] in (\"\", u\"склав\") else item[20]\n\n data['diploma_weight'] = 0 if item[21] == \"\" else item[21]\n\n try:\n cursor.execute(u\"INSERT INTO `calc-2015` (`region`, `city`, `university`, `direction`, `direction_code`, `test_first`, `test_first_level`, `test_first_weight`, `test_first_min`, `test_second`, `test_second_level`, `test_second_weight`, `test_second_min`, `test_third`, `test_third_level`, `test_third_weight`, `test_third_min`, `test_fourth`, `test_fourth_level`, `test_fourth_weight`, `test_fourth_min`, `diploma_weight`) VALUES (\\\"{0}\\\", \\\"{1}\\\", \\\"{2}\\\", \\\"{3}\\\", {4}, \\\"{5}\\\", {6}, {7}, \\\"{8}\\\", \\\"{9}\\\", {10}, {11}, \\\"{12}\\\", \\\"{13}\\\", {14}, \\\"{15}\\\", \\\"{16}\\\", \\\"{17}\\\", {18}, {19}, \\\"{20}\\\", {21});\".format(\n data['region'],\n data['city'],\n data['university'],\n data['direction_name'],\n data['direction_code'],\n data['test_first'],\n data['test_first_level'],\n data['test_first_weight'],\n data['test_first_min_ball'],\n data['test_second'],\n data['test_second_level'],\n data['test_second_weight'],\n data['test_second_min_ball'],\n data['test_third'],\n data['test_third_level'],\n data['test_third_weight'],\n data['test_third_min_ball'],\n data['test_fourth'],\n data['test_fourth_level'],\n data['test_fourth_weight'],\n data['test_fourth_min_ball'],\n data['diploma_weight']\n ))\n except (OperationalError, ProgrammingError):\n print(u\"INSERT INTO `calc-2015` (`region`, `city`, `university`, `direction`, `direction_code`, `test_first`, `test_first_level`, `test_first_weight`, `test_first_min`, `test_second`, `test_second_level`, `test_second_weight`, `test_second_min`, `test_third`, `test_third_level`, `test_third_weight`, `test_third_min`, `test_fourth`, `test_fourth_level`, `test_fourth_weight`, `test_fourth_min`, `diploma_weight`) VALUES (\\\"{0}\\\", \\\"{1}\\\", \\\"{2}\\\", \\\"{3}\\\", {4}, \\\"{5}\\\", {6}, {7}, {8}, \\\"{9}\\\", {10}, {11}, {12}, \\\"{13}\\\", {14}, {15}, {16}, \\\"{17}\\\", {18}, {19}, {20}, {21});\".format(\n data['region'],\n data['city'],\n data['university'],\n data['direction_name'],\n data['direction_code'],\n data['test_first'],\n data['test_first_level'],\n data['test_first_weight'],\n data['test_first_min_ball'],\n data['test_second'],\n data['test_second_level'],\n data['test_second_weight'],\n data['test_second_min_ball'],\n data['test_third'],\n data['test_third_level'],\n data['test_third_weight'],\n data['test_third_min_ball'],\n data['test_fourth'],\n data['test_fourth_level'],\n data['test_fourth_weight'],\n data['test_fourth_min_ball'],\n data['diploma_weight']\n ))","sub_path":"calculator/parser/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":4964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"287772126","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author:XuMing(xuming624@qq.com)\n@description: \n\"\"\"\n\nimport logging\nimport threading\nimport time\n\n\ndef get_logger():\n logger = logging.getLogger(\"threading_eg\")\n logger.setLevel(logging.DEBUG)\n # fh = logging.FileHandler(\"out.log\")\n fh = logging.StreamHandler()\n fmt = '%(asctime)s - %(name)s - %(processName)s - %(threadName)s - %(levelname)s - %(message)s'\n formatter = logging.Formatter(fmt)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n return logger\n\n\ndef doubler(number, logger):\n logger.debug(\"xxxx\")\n logger.info(\"aaaa\")\n logger.warning(\"bbbb\")\n logger.error(\"cccc\")\n\n result = number * 2\n time.sleep(5)\n logger.debug('yyyy: {}'.format(\n result))\n\n\nlogger = get_logger()\nthread_names = ['Mike', 'George', 'Wanda', 'Dingbat', 'Nina']\nfor i in range(5):\n my_thread = threading.Thread(\n target=doubler, name=thread_names[i], args=(i, logger))\n my_thread.start()","sub_path":"23thread/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"231661447","text":"import numpy as np\nimport pandas as pd\n\nimport json\nimport os\nimport sys\nimport traceback\n\nfrom collections import namedtuple\n\n\ndef read_config(cfg_file):\n hcr = HyperparameterConfigurationReader(cfg_file)\n \n return hcr.get_config()\n\n\nclass HyperparameterConfigurationReader(object):\n def __init__(self, cfg_file_name, config_path=\"\"):\n self._dict = {}\n if not cfg_file_name.endswith('.json'):\n cfg_file_name += '.json'\n path =\"{}{}\".format(config_path, cfg_file_name) \n if os.path.exists(path):\n self._dict = self.read_json(path)\n else:\n print(\"hyperparam config not found: {}\".format(path))\n\n def read_json(self, cfg_file_name):\n with open(cfg_file_name) as json_cfg:\n json_dict = json.load(json_cfg)\n return json_dict\n\n def get_config(self):\n try:\n hc = HyperparameterConfiguration(self._dict)\n if self.validate(hc):\n return hc\n except Exception as ex:\n raise ValueError(\"Invalid configuration: {}\".format(self._dict))\n \n def validate(self, cfg):\n if not hasattr(cfg, 'hyperparams'):\n print('json object does not contain hyperparams attribute: {}'.format(cfg))\n return False\n\n for hyperparam, conf in cfg.hyperparams.__dict__.items():\n\n # attribute existence test\n if not hasattr(conf, 'type'):\n print(hyperparam + \" has not type attribute.\")\n return False\n else:\n supported_types = ['int', 'float', 'str', 'bool', 'unicode']\n if not conf.type in supported_types:\n return False\n\n if not hasattr(conf, 'value_type'):\n print(hyperparam + \" has not value_type attribute.\")\n return False\n else:\n supported_value_types = ['discrete', 'continuous', 'preordered', 'categorical']\n if not conf.value_type in supported_value_types:\n return False\n\n if not hasattr(conf, 'range'):\n print(hyperparam + \" has not range attribute.\")\n return False\n else:\n range_list = conf.range\n if len(range_list) is 0:\n print(hyperparam + \" has no range values\")\n return False\n\n for value in range_list:\n value_type_name = type(value).__name__\n if value_type_name == 'unicode':\n value_type_name = 'str'\n if value_type_name != conf.type: \n if not hasattr(conf, 'power_of'):\n print(hyperparam + \" has invalid type item.\")\n return False\n\n return True\n\n\nclass DictionaryToObject(object):\n def __init__(self, d):\n for a, b in d.items():\n if isinstance(b, (list, tuple)):\n setattr(self, a, [DictionaryToObject(x) \n if isinstance(\n x, dict) else x for x in b])\n else:\n setattr(self, a, DictionaryToObject(b) \n if isinstance(b, dict) else b)\n\n\nclass HyperparameterConfiguration(DictionaryToObject):\n def __init__(self, d):\n self._dict = d\n super(HyperparameterConfiguration, self).__init__(d)\n \n def get_hyperparams(self):\n return self._dict['param_order']\n\n def get_type(self, name):\n range = []\n hyperparams = self.hyperparams\n if name in hyperparams.__dict__.keys():\n hyperparam = getattr(hyperparams, name)\n if hyperparam.type == 'unicode':\n return \"str\"\n else:\n return hyperparam.type\n \n return range\n\n def get_range(self, name):\n range = []\n hyperparams = self.hyperparams\n if name in hyperparams.__dict__.keys():\n hyperparam = getattr(hyperparams, name)\n range = hyperparam.range\n \n if hasattr(hyperparam, 'power_of'):\n base = hyperparam.power_of\n range = []\n for power in hyperparam.range:\n range.append(base**power)\n\n if hyperparam.type == 'unicode':\n range = []\n for item in hyperparam.range:\n range.append(item.encode('ascii', 'ignore'))\n \n return range\n\n def get_dict(self):\n return self._dict\n","sub_path":"hpbandster/examples/hp_cfg.py","file_name":"hp_cfg.py","file_ext":"py","file_size_in_byte":4597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"617596401","text":"from datetime import datetime\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport json\nimport requests\nimport os\nimport time\nimport config\nimport random\n\nrandom.uniform(1,10)\n\n# Set up for Adding to Index\nURL = 'https://nih.search.windows.net'.format(config.config[\"service_name\"])\nKEY = config.config[\"KEY\"]\nINDEX_NAME = 'nih-data'\nAPI = config.config[\"API\"]\nheaders = {'content-type': 'application/json', 'api-key': KEY}\n\ndef extract_text(html):\n small_soup = BeautifulSoup(html, \"html.parser\")\n return small_soup.get_text().strip().encode(\"ASCII\",\"ignore\").decode(\"ASCII\")\n\ndef get_contents_of_links(links, browser):\n URL_PATTERN = 'project_info_description.cfm'\n content_list = []\n for link in links:\n if URL_PATTERN in link:\n # link.click()\n print(\"Extracting: {}\".format(link))\n sleep_len = 10 + random.uniform(1,10)\n print(\"Sleeping for {}\".format(sleep_len))\n time.sleep(sleep_len)\n browser.get(link)\n\n link_details = dict()\n\n # Extract meta data about grant\n project_meta = browser.find_element_by_css_selector(\"div.search_criteria\")\n project_meta_cells = project_meta.find_elements_by_css_selector(\"td\")\n\n link_details[\"projectnumber\"] = extract_text(project_meta_cells[1].get_attribute(\"innerHTML\"))\n link_details[\"projectleader\"] = extract_text(project_meta_cells[3].get_attribute(\"innerHTML\"))\n link_details[\"projecttitle\"] = extract_text(project_meta_cells[5].get_attribute(\"innerHTML\"))\n link_details[\"awardeeorg\"] = extract_text(project_meta_cells[7].get_attribute(\"innerHTML\"))\n\n # Extract main content\n project_table = browser.find_element_by_css_selector(\"table.proj_info_cont\")\n grant_content = project_table.find_elements_by_css_selector(\"td\")\n\n link_details[\"abstract\"] = extract_text(grant_content[0].get_attribute(\"innerHTML\"))\n link_details[\"publichealth\"] = extract_text(grant_content[1].get_attribute(\"innerHTML\"))\n link_details[\"terms\"] = extract_text(grant_content[2].get_attribute(\"innerHTML\"))\n\n # full_content = browser.find_element_by_css_selector(\".project_res\")\n # html = full_content.get_attribute(\"innerHTML\")\n # soup = BeautifulSoup(html, 'html.parser')\n\n # link_details[\"full_content\"] = soup.get_text().strip().replace(\" \", \" \")\n\n link_details['@search.action'] = 'upload'\n\n content_list.append(link_details)\n\n return content_list\n\nif __name__ == \"__main__\":\n # Initialize browser for web scraping\n browser = webdriver.Firefox()\n\n # Scraping pages and collecting data into content_list\n content_list = []\n for page in range(4,10):\n URL_PARENT = 'https://report.nih.gov/award/index.cfm?ot=&fy=2018&state=&ic=&fm=&orgid=&distr=&rfa=&om=n&pid=&view=data&pagenum={}&sortcol=pn&sortdir=asc#tab5'.format(page)\n browser.get(URL_PARENT)\n\n table_links = browser.find_elements_by_css_selector(\"table.res_cont tbody tr td a.tablelink\")\n\n links = [link.get_attribute(\"href\") for link in table_links]\n try:\n content_list = content_list + get_contents_of_links(links, browser)\n except Exception as e:\n temp_content_in_json = json.dumps({'value':content_list}, ensure_ascii=False,indent=2)\n output_name = \"CRASH_upload_{}.json\".format(datetime.now().strftime(\"%Y%M%d%H%S\"))\n with open(os.path.join(\".\",\"data\",\"nih\",output_name), 'w') as f:\n json.dump(temp_content_in_json, f)\n\n\n browser.quit()\n\n # Uploading to Index\n content_url = ''.join([URL, '/indexes/',INDEX_NAME,'/docs/index?api-version=',API])\n\n content_in_json = json.dumps({'value':content_list}, ensure_ascii=False,indent=2)\n\n output_name = \"upload_{}.json\".format(datetime.now().strftime(\"%Y%m%d%H%S\"))\n with open(os.path.join(\".\",\"data\",\"nih\",output_name), 'w') as f:\n json.dump(content_in_json, f)\n\n post_value = requests.post(content_url, headers=headers, data = content_in_json)\n\n print('Status Code: {}'.format(post_value.status_code))\n print(post_value.json())\n\n\n\n","sub_path":"search/nih/src/scrape_nih.py","file_name":"scrape_nih.py","file_ext":"py","file_size_in_byte":4235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"183598782","text":"from ALEFramework.LeggedPredator import LeggedPredator\nclass Chicken(LeggedPredator):\n def __init__(self):\n mdNames = ['leg1', 'leg2', 'leg3', 'leg4']\n turnSpeed = 1.0\n forSpeed = 1.0\n objName = 'chicken'\n self.food = ['grain', 'grain1', 'grain2', 'grain3']\n super().__init__(mdNames, forSpeed, objName)\n self.legs = self.getMotorDevices()\n self.infPos = float('inf')\n self.multiMoveMotorPos(self.legs, self.infPos)\n self.setMultiMotorVel(self.legs, 1)\n self.setMaxEnergy(100000)\n self.setEnergy(100000)\n self.setConsumptionEnergy(10000)\n self.setFocusAngle(0.1)\n \n def behaviour(self):\n while self.robot.step(self.timestep) != -1:\n self.energy = self.energy -1\n if self.energy > 600000:\n self.moveForward()\n isObstacle = self.checkObstacle()\n \n if isObstacle:\n self.avoidObstacle(isObstacle)\n else:\n self.predBehaviour()\n collided = self.checkEnergyCollision(self.food)\n if collided:\n self.eat(collided)\n \nchicken = Chicken()\nchicken.behaviour()","sub_path":"ALE_Example1/Controllers_For_Example/Chicken_Controller/Chicken_Controller.py","file_name":"Chicken_Controller.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"465286363","text":"import time\r\nimport pygame\r\nimport random\r\nimport math\r\nfrom .gameobject import GameObject\r\n\r\nclass CircleEmitter(GameObject):\r\n def __init__(self, name, position, lifetime, amount, bursts, rate):\r\n super().__init__(name, position, (0,0))\r\n self.position = position\r\n self.lifetime = lifetime\r\n self.amount = amount\r\n self.bursts = bursts\r\n self.created_particles = 0\r\n self.rate = rate\r\n self.particles = []\r\n self.can_emit = True\r\n self.last_emit = 0\r\n self.dt = 1/60\r\n self.base_color = (255, 255, 255)\r\n self.base_size = 6\r\n self.base_lifetime = 10\r\n\r\n def emit(self):\r\n if self.created_particles >= self.bursts and self.bursts!=-1:\r\n self.can_emit = False\r\n if self.can_emit:\r\n self.can_emit = False\r\n for i in range(self.amount):\r\n self.particles.append(Particle(\r\n self.position,\r\n self.base_color,\r\n self.base_lifetime,\r\n self.base_size + random.uniform(-1, 1)\r\n ))\r\n self.created_particles += 1\r\n self.last_emit = time.time()\r\n\r\n def draw(self, surface):\r\n if self.lifetime >= 0 and self.lifetime != -1:\r\n self.lifetime -= self.dt\r\n self.emit()\r\n if not self.can_emit and (time.time() > self.last_emit + self.rate):\r\n self.can_emit = True\r\n for particle in self.particles:\r\n particle.update(self.dt)\r\n particle.draw(surface)\r\n if particle.lifetime <= 0:\r\n self.particles.remove(particle)\r\n\r\n def is_emitting(self):\r\n if self.lifetime == -1 or self.lifetime>0:\r\n return True\r\n return False\r\n \r\nclass Particle():\r\n def __init__(self, position, color, base_lifetime, size):\r\n self.position = position\r\n self.color = color\r\n self.base_lifetime = base_lifetime\r\n self.lifetime = base_lifetime\r\n self.size = size\r\n\r\n def update(self, dt):\r\n if self.lifetime >= 0:\r\n self.lifetime -= dt\r\n lifetime_factor = self.lifetime/self.base_lifetime\r\n if(lifetime_factor>=0):\r\n self.size *= 1+lifetime_factor*.25\r\n else : self.lifetime = 0\r\n \r\n if(self.size > 256 ):\r\n self.lifetime = 0\r\n\r\n def draw(self, surface):\r\n pygame.draw.circle(surface, self.color, self.position, int(self.size), 1)","sub_path":"utils/circle.py","file_name":"circle.py","file_ext":"py","file_size_in_byte":2536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"300456788","text":"from helpers import alphabet_position, rotate_character\n\ndef encrypt(text,rot):\n encrypted = \"\"\n for ch in text:\n encrypted += rotate_character(ch,rot)\n return encrypted\ndef main():\n text = input(\"Type a message:\")\n rot = int(input(\"Rotate by:\"))\n print(encrypt(text,rot))\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"crypto/caesar.py","file_name":"caesar.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"610683526","text":"\"\"\"\nYo! Twerug van weggeweest. Als het gaat om je innerlijke stem te volgen\nHeb ik nog wel een probleem (of uitdaging): ik hoor niks! Maar voor nu wil\nik heel graag memoization snappen en dat ook kunnen toepassen.\nHier is een stuk code van de bepsait pythpn-course.eu (/python3_memoization.php)\n#-------------------------------------------------------------------------------\n# DJO202103210824\n# via https://www.python-course.eu/python3_memoization.php\n# https://pythoncursus.nl/decorators-python/\n# https://stackoverflow.com/questions/739654/how-to-make-function-decorators-and-chain-them-together/1594484#1594484\n#---\n# Takeaway: je pakt de functie in in de decorator.\n# Met de hand ziet dat er zo uit.\n# Er spelen 2 dingen: decorators, dat zijn callables die een callable teruggeven en\n# Memoization, een pattern om functie uitkomsten te hergebruiken zodat op \n#---\n\"\"\"\ndef memoize(fe):\n memo = {}\n def helper(x):\n if x not in memo: \n memo[x] = fe(x)\n return memo[x]\n return helper\n \n\n@memoize\ndef fib(n):\n if n == 0:\n return 0\n elif n == 1:\n return 1\n else:\n return fib(n-1) + fib(n-2)\n\n# Dit is het feitelijke inpakken, en het doet me denken aan formele en actuele parameters\n# ik heb er ff memoiz van gemaakt om te checken dat memoize geen reserved word is\n# so to speak. En dat is het niet (mag dus gewoon memoize zijn)\n# Ziehier de kleuternotatie (zonder (@decorator) (ff commenten en uncommenten enzo)\n# fib = memoize(fib)\n# Let u ook even op r.22 return helper!\n#ik probeer \nprint(fib(40))\n\n","sub_path":"20200316_Python/Codewars/20210318_Memoization 1.py","file_name":"20210318_Memoization 1.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"445457322","text":"from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom datetime import datetime\nimport IPython\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://nc:kegerator1234@localhost:3306/kegerator'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\ndb = SQLAlchemy(app)\n\nclass Batch(db.Model):\n __tablename__ = 'batches'\n id = db.Column(db.Integer, primary_key=True)\n created_at = db.Column(db.DateTime)\n current = db.Column(db.Boolean)\n beer_id = db.Column(db.Integer, db.ForeignKey('beers.id'))\n keg_id = db.Column(db.Integer, db.ForeignKey('kegs.id'))\n \n def __init__(self, beer, keg, current):\n self.current = current\n self.beer_id = beer.id\n self.keg_id = keg.id\n self.created_at = datetime.utcnow()\n \n def to_json(self):\n all_pours = db.session.query(Pour).filter_by(batch_id=self.id).all()\n volume_poured = sum([pour.volume_poured for pour in all_pours])\n volume_left = Keg.query.filter_by(id=self.keg_id).first().total_volume - volume_poured\n return {\n 'id': self.id,\n 'created_at': self.created_at.strftime(\"%Y-%m-%dT%T.620Z\"),\n 'current': self.current,\n 'beer': Beer.query.filter_by(id=self.beer_id).first().to_json(),\n 'keg': Keg.query.filter_by(id=self.keg_id).first().to_json(),\n 'pours': [ pour.to_json() for pour in all_pours ],\n 'volume_poured': volume_poured,\n 'volume_remaining': volume_left\n }\n\nclass Beer(db.Model):\n __tablename__ = 'beers'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(32), unique=True)\n batches = db.relationship('Batch', backref='beer', lazy='dynamic')\n \n def __init__(self, name):\n self.name = name\n \n def to_json(self):\n return {\n 'id': self.id,\n 'name': self.name\n }\n \nclass Keg(db.Model):\n __tablename__ = 'kegs'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(32), unique=True)\n total_volume = db.Column(db.Float)\n units = db.Column(db.String(32))\n batches = db.relationship('Batch', backref='keg', lazy='dynamic')\n \n def __init__(self, name, total_volume, units):\n self.name = name\n self.total_volume = total_volume\n self.units = units\n \n def to_json(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'total_volume': self.total_volume,\n 'units': self.units\n }\n \nclass Pour(db.Model):\n __tablename__ = 'pours'\n id = db.Column(db.Integer, primary_key=True)\n created_at = db.Column(db.DateTime)\n volume_poured = db.Column(db.Float)\n batch_id = db.Column(db.Integer, db.ForeignKey('batches.id'))\n \n def __init__(self, batch, volume):\n self.batch_id = batch.id\n self.volume_poured = volume\n self.created_at = datetime.utcnow()\n \n def to_json(self):\n return {\n\t 'created_at': self.created_at.strftime(\"%Y-%m-%dT%T.620Z\"),\n 'volume_poured': self.volume_poured\n }\n\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"296912770","text":"import numpy as np\nfrom layers.dataset import cifar100\nimport matplotlib.pyplot as plt\n\n# Please make sure that cifar-100-python is present in the same folder as dataset.py\n\n(x_train, y_train), (x_test, y_test) = cifar100(1212356299)\n\nfrom layers import (FullLayer , ReluLayer , SoftMaxLayer ,CrossEntropyLayer , Sequential)\nmodel = Sequential(layers =( FullLayer ( 3072 , 500) , ReluLayer(), FullLayer (500,4) , SoftMaxLayer() ) , loss=CrossEntropyLayer())\n\nlr_accuracies = np.zeros((3,))\n\n\nloss1 = model.fit(x_train, y_train, lr = 0.01, epochs=15)\ny_predict = model.predict(x_test)\n\ncount = 0\nfor i in range(np.size(y_test)):\n if y_predict[i] == y_test[i]:\n count += 1\n\nlr_accuracies[0] = (100.0*count)/np.shape(y_predict)[0]\n\n\nloss2 = model.fit(x_train, y_train, lr = 0.1, epochs=15)\n\ny_predict = model.predict(x_test)\n\ncount = 0\nfor i in range(np.size(y_test)):\n if y_predict[i] == y_test[i]:\n count += 1\n\nlr_accuracies[1] = (100.0*count)/np.shape(y_predict)[0]\n\nloss3 = model.fit(x_train, y_train, lr = 10, epochs=15)\n\ny_predict = model.predict(x_test)\n\ncount = 0\nfor i in range(np.size(y_test)):\n if y_predict[i] == y_test[i]:\n count += 1\n\nlr_accuracies[2] = (100.0*count)/np.shape(y_predict)[0]\n\nplt.figure(1)\n\nplt.plot(range(1,16), loss1, label='Loss for lr = 0.01')\nplt.plot(range(1,16), loss2, label='Loss for lr = 0.1')\n#plt.plot(range(1,16), loss3)\nplt.xlabel(\"Epochs\")\nplt.ylabel(\"Loss\")\nplt.title(\"Loss comparisons for different learning rate\")\n#plt.title(\"Loss for lr = 10\")\nplt.legend()\nplt.show()\n\n\nlr = [\"0.01\", \"0.1\", \"10\"]\n\nplt.figure(2)\n\n#plt.plot(lr,\"ro\", lr_accuracies, label='Accuracies')\nplt.plot([0, lr_accuracies[0], lr_accuracies[1], lr_accuracies[2], 0], \"ro\")\nplt.xticks(range(5), [\"0\", \"0.01\", \"0.1\", \"10\", \"100\"])\nplt.xlabel(\"Learning rates\")\nplt.ylabel(\"Accuracy (%)\")\nplt.title(\"Accuracy comparison for different learning rate\")\nplt.legend()\nplt.show()\n","sub_path":"EEE598/ANIK_JHA_LAB3/layers/epoch_loss.py","file_name":"epoch_loss.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"580182638","text":"import logging\n\nfrom autolamella.acquire import grab_ion_image\nimport autolamella.autoscript\nfrom autolamella.user_input import ask_user\nfrom autolamella.sample import Lamella\n\n\ndef add_samples(microscope, settings):\n \"\"\"Interactive function to add samples to list.\n\n Parameters\n ----------\n microscope : Autoscript microscope object.\n settings : Dictionary of user input argument settings.\n\n Returns\n -------\n samples\n List of FIB-SEM sample objects.\n \"\"\"\n autolamella.autoscript.reset_state(microscope, settings)\n default_response_yes = [\"\", \"yes\", \"y\"]\n response_no = [\"no\", \"n\"]\n\n samples = []\n user_response = \"\"\n while user_response.lower() not in response_no:\n message = (\n f\"\"\"Move to the desired location. {len(samples)} locations selected so far.\n Do you want to select this location for milling? [y]/n\n \"\"\")\n user_response = input(message)\n if user_response.lower() in default_response_yes:\n my_sample = add_single_sample(microscope, settings)\n samples.append(my_sample)\n samples = [s for s in samples if s is not None]\n return samples\n\n\ndef add_single_sample(microscope, settings):\n \"\"\"Create a single lamella object.\n\n Parameters\n ----------\n microscope : Autoscript microscope object.\n settings : Dictionary of user input argument settings.\n\n Returns\n -------\n my_lamella\n A single Lamella() object.\n \"\"\"\n from autoscript_core.common import ApplicationServerException\n from autoscript_sdb_microscope_client.structures import (GrabFrameSettings,\n Rectangle)\n\n autolamella.autoscript.reset_state(microscope, settings)\n demo_mode = settings[\"demo_mode\"]\n acquire_many_images = settings[\"imaging\"][\"full_field_ib_images\"]\n # Reset microscope state\n autolamella.autoscript.reset_state(microscope, settings)\n microscope.beams.ion_beam.beam_current.value = settings[\"lamella\"][\n \"milling_current\"\n ]\n # Optional autocontrast\n if settings[\"imaging\"][\"autocontrast\"]:\n microscope.imaging.set_active_view(2) # the ion beam view\n autolamella.acquire.autocontrast(microscope)\n # Take full field image\n full_field_camera_settings = autolamella.acquire.create_camera_settings(\n settings[\"imaging\"], reduced_area=Rectangle(0, 0, 1, 1)\n )\n original_image = grab_ion_image(microscope, full_field_camera_settings)\n # Select fiducial posiion\n print(\"Please select where to put a fiducial marker.\")\n my_fiducial = autolamella.fiducial.fiducial(\n microscope,\n original_image,\n settings[\"fiducial\"][\"fiducial_length\"],\n settings[\"fiducial\"][\"fiducial_width\"],\n settings[\"fiducial\"][\"fiducial_image_size_x\"],\n settings[\"fiducial\"][\"fiducial_image_size_y\"],\n settings[\"fiducial\"][\"fiducial_milling_depth\"],\n )\n if my_fiducial is None:\n print(\"No fiducial location selected, cancelling.\")\n microscope.patterning.clear_patterns()\n return\n #\n fiducial_coord_realspace, fiducial_coord_relative, fiducial_coord_pixels = (\n my_fiducial\n )\n pixelsize_x = original_image.metadata.binary_result.pixel_size.x\n fiducial_image_relative_size = [\n settings[\"fiducial\"][\"fiducial_image_size_x\"]\n / (original_image.width * pixelsize_x),\n settings[\"fiducial\"][\"fiducial_image_size_y\"]\n / (original_image.height * pixelsize_x),\n ]\n reduced_area_fiducial = autolamella.fiducial.fiducial_reduced_area_rect(\n fiducial_coord_relative, fiducial_image_relative_size\n )\n camera_settings = autolamella.acquire.create_camera_settings(\n settings[\"imaging\"], reduced_area=reduced_area_fiducial\n )\n cropped_original_image = grab_ion_image(microscope, camera_settings)\n my_lamella = Lamella(microscope)\n my_lamella.fiducial_image_relative_size = fiducial_image_relative_size\n my_fiducial = my_lamella.set_fiducial(\n cropped_original_image,\n fiducial_coord_realspace,\n fiducial_coord_relative,\n fiducial_coord_pixels,\n reduced_area_fiducial,\n )\n # Select the lamella position\n print(\"Please select the center point of your lamella.\")\n my_lamella.original_image = original_image\n lamella_center = my_lamella.set_center(original_image, settings)\n if lamella_center == []:\n print(\"No lamella position selected, cancelling.\")\n microscope.patterning.clear_patterns()\n return\n # Ask user for decision\n message = \"Are you happy with this position? [y]/n\\n\"\n if ask_user(message, default=\"yes\"):\n message = \"Do you want to mill a fiducial marker here? [y]/n\\n\"\n if ask_user(message, default=\"yes\"):\n print(\"Milling fiducial marker...\")\n if not demo_mode:\n microscope.beams.ion_beam.beam_current.value = settings[\"fiducial\"][\n \"fiducial_milling_current\"\n ]\n microscope.imaging.set_active_view(2) # the ion beam view\n try:\n microscope.patterning.run()\n except ApplicationServerException:\n logging.error(\"ApplicationServerException: could not mill!\")\n microscope.patterning.clear_patterns()\n return # returns None which gets stripped from sample list\n if acquire_many_images:\n full_field_camera_settings = GrabFrameSettings(\n reduced_area=Rectangle(0, 0, 1, 1),\n resolution=settings[\"imaging\"][\"resolution\"],\n dwell_time=settings[\"imaging\"][\"dwell_time\"],\n )\n microscope.auto_functions.run_auto_cb()\n reference_image = grab_ion_image(microscope, full_field_camera_settings)\n my_lamella.reference_image = reference_image\n camera_settings = GrabFrameSettings(\n reduced_area=reduced_area_fiducial,\n resolution=settings[\"fiducial\"][\"reduced_area_resolution\"],\n dwell_time=settings[\"imaging\"][\"dwell_time\"],\n )\n cropped_reference_image = grab_ion_image(microscope, camera_settings)\n message = \"Do you want to re-mill the fiducial marker? y/[n]\\n\"\n if ask_user(message, default=\"no\"):\n print(\"Milling fiducial marker again...\")\n if not demo_mode:\n microscope.imaging.set_active_view(2) # the ion beam view\n try:\n microscope.patterning.run()\n except ApplicationServerException:\n logging.error(\"ApplicationServerException: could not mill\")\n microscope.patterning.clear_patterns()\n return # returns None which gets stripped from sample list\n if acquire_many_images:\n full_field_camera_settings = GrabFrameSettings(\n reduced_area=Rectangle(0, 0, 1, 1),\n resolution=settings[\"imaging\"][\"resolution\"],\n dwell_time=settings[\"imaging\"][\"dwell_time\"],\n )\n microscope.auto_functions.run_auto_cb()\n reference_image = grab_ion_image(microscope, full_field_camera_settings)\n my_lamella.reference_image = reference_image\n microscope.patterning.clear_patterns()\n else:\n print(\"Ok, deleting those milling patterns.\")\n microscope.patterning.clear_patterns()\n return # returns None, which gets stripped from sample list later\n # Continue on\n camera_settings = GrabFrameSettings(\n reduced_area=reduced_area_fiducial,\n resolution=settings[\"fiducial\"][\"reduced_area_resolution\"],\n dwell_time=settings[\"imaging\"][\"dwell_time\"],\n )\n cropped_reference_image = grab_ion_image(microscope, camera_settings)\n my_lamella.set_fiducial(\n cropped_reference_image,\n fiducial_coord_realspace,\n fiducial_coord_relative,\n fiducial_coord_pixels,\n reduced_area_fiducial,\n )\n if not acquire_many_images:\n my_lamella.reference_image = cropped_reference_image\n my_lamella.set_sem_image(microscope, settings)\n my_lamella.set_custom_milling_depth()\n return my_lamella\n","sub_path":"autolamella/add_samples.py","file_name":"add_samples.py","file_ext":"py","file_size_in_byte":8465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"474394914","text":"\"\"\"\n:Copyright: 2006-2019 Jochen Kupperschmidt\n:License: Modified BSD, see LICENSE for details.\n\"\"\"\n\nfrom tests.base import AbstractAppTestCase\nfrom tests.helpers import create_brand, create_email_config, create_party, \\\n create_site, create_user, http_client\n\n\nclass UserProfileTest(AbstractAppTestCase):\n\n def setUp(self):\n super().setUp()\n\n brand = create_brand()\n party = create_party(brand.id)\n create_email_config()\n create_site(party.id)\n\n self.user = create_user()\n\n def test_view_profile(self):\n url = '/users/{}'.format(self.user.id)\n\n with http_client(self.app) as client:\n response = client.get(url)\n\n assert response.status_code == 200\n assert response.mimetype == 'text/html'\n","sub_path":"tests/blueprints/user/test_views_user_profile.py","file_name":"test_views_user_profile.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"417791070","text":"import logging\nfrom collections import OrderedDict\nfrom dataclasses import dataclass, field\nfrom typing import Dict, Type\n\nimport requests\nfrom requests.exceptions import HTTPError\n\nfrom datahub.configuration.common import ConfigModel\nfrom datahub.ingestion.api.common import RecordEnvelope, WorkUnit\nfrom datahub.ingestion.api.sink import Sink, SinkReport, WriteCallback\nfrom datahub.metadata import ( # MLFeatureSnapshotClass,\n ChartSnapshotClass,\n CorpGroupSnapshotClass,\n CorpUserSnapshotClass,\n DashboardSnapshotClass,\n DataProcessSnapshotClass,\n DatasetSnapshotClass,\n MLModelSnapshotClass,\n)\nfrom datahub.metadata.com.linkedin.pegasus2avro.mxe import MetadataChangeEvent\n\nlogger = logging.getLogger(__name__)\n\nresource_locator: Dict[Type[object], str] = {\n ChartSnapshotClass: \"charts\",\n DashboardSnapshotClass: \"dashboards\",\n CorpUserSnapshotClass: \"corpUsers\",\n CorpGroupSnapshotClass: \"corpGroups\",\n DatasetSnapshotClass: \"datasets\",\n DataProcessSnapshotClass: \"dataProcesses\",\n MLModelSnapshotClass: \"mlModels\",\n}\n\n\ndef _rest_li_ify(obj):\n if isinstance(obj, (dict, OrderedDict)):\n if len(obj.keys()) == 1:\n key = list(obj.keys())[0]\n value = obj[key]\n if key.find(\"com.linkedin.pegasus2avro.\") >= 0:\n new_key = key.replace(\"com.linkedin.pegasus2avro.\", \"com.linkedin.\")\n return {new_key: _rest_li_ify(value)}\n elif key == \"string\" or key == \"array\":\n return value\n\n new_obj = {}\n for key, value in obj.items():\n if value is not None:\n new_obj[key] = _rest_li_ify(value)\n return new_obj\n elif isinstance(obj, list):\n new_obj = [_rest_li_ify(item) for item in obj]\n return new_obj\n return obj\n\n\nclass DatahubRestSinkConfig(ConfigModel):\n \"\"\"Configuration class for holding connectivity to datahub gms\"\"\"\n\n server: str = \"http://localhost:8080\"\n\n\n@dataclass\nclass DatahubRestSink(Sink):\n config: DatahubRestSinkConfig\n report: SinkReport = field(default_factory=SinkReport)\n\n @classmethod\n def create(cls, config_dict, ctx):\n config = DatahubRestSinkConfig.parse_obj(config_dict)\n return cls(ctx, config)\n\n def get_ingest_endpoint(self, mce: MetadataChangeEvent):\n snapshot_type = type(mce.proposedSnapshot)\n snapshot_resource = resource_locator.get(snapshot_type, None)\n if not snapshot_resource:\n raise ValueError(\n f\"Failed to locate a snapshot resource for type {snapshot_type}\"\n )\n\n return f\"{self.config.server}/{snapshot_resource}?action=ingest\"\n\n def handle_work_unit_start(self, workunit: WorkUnit) -> None:\n pass\n\n def handle_work_unit_end(self, workunit: WorkUnit) -> None:\n pass\n\n def write_record_async(\n self,\n record_envelope: RecordEnvelope[MetadataChangeEvent],\n write_callback: WriteCallback,\n ):\n headers = {\"X-RestLi-Protocol-Version\": \"2.0.0\"}\n\n mce = record_envelope.record\n url = self.get_ingest_endpoint(mce)\n\n raw_mce_obj = mce.proposedSnapshot.to_obj()\n\n mce_obj = _rest_li_ify(raw_mce_obj)\n snapshot = {\"snapshot\": mce_obj}\n try:\n response = requests.post(url, headers=headers, json=snapshot)\n # with open('data.json', 'w') as outfile:\n # json.dump(serialized_snapshot, outfile)\n response.raise_for_status()\n self.report.report_record_written(record_envelope)\n write_callback.on_success(record_envelope, {})\n except HTTPError as e:\n info = response.json()\n self.report.report_failure({\"e\": e, \"info\": info})\n write_callback.on_failure(record_envelope, e, info)\n except Exception as e:\n self.report.report_failure({\"e\": e})\n write_callback.on_failure(record_envelope, e, {})\n\n def get_report(self) -> SinkReport:\n return self.report\n\n def close(self):\n pass\n","sub_path":"metadata-ingestion/src/datahub/ingestion/sink/datahub_rest.py","file_name":"datahub_rest.py","file_ext":"py","file_size_in_byte":4057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"566858852","text":"from luma.core.interface.serial import spi, noop\nfrom luma.core.render import canvas\nfrom luma.core.virtual import viewport\nfrom luma.led_matrix.device import max7219\nfrom luma.core.legacy.font import proportional, LCD_FONT\nfrom luma.core.legacy import text\nimport time\nfrom PIL import Image, ImageDraw \n\n\nclass MyMatrix:\n\n def __init__(self, cascaded=1):\n serial = spi(port=0, device=0, gpio=noop())\n self.device = max7219(serial, cascaded=cascaded)\n self.test = 'Hello'\n self.pixelList = list()\n \n def letter(self, letter, matrix=1):\n with canvas(self.device) as draw:\n text(draw, (((matrix-1)*8), 0), letter, fill=\"white\", font=proportional(LCD_FONT))\n \n def setPixel(self, x, y):\n with canvas(self.device) as draw:\n draw.point((x,y), fill=\"white\")\n\n def showPixels(self):\n with canvas(self.device) as draw:\n draw.point(tuple(self.pixelList), fill=\"white\")\n\n def pixel(self, x, y, state):\n if state == True or state == 1:\n if (x,y) in self.pixelList:\n self.pixelList[self.pixelList.index((x,y))] = (x,y)\n else:\n self.pixelList.append((x,y))\n else:\n if (x,y) in self.pixelList:\n self.pixelList.remove((x,y))\n self.showPixels()\n\n def hello(self):\n return self.test\n\n def showMessage(self, textString, sleepTime = 0.1):\n print(self.device.width)\n #self.device.width\n for i in range((len(textString)*6)):\n with canvas(self.device) as draw:\n text(draw, (i*-1, 0), textString, fill=\"white\", font=proportional(LCD_FONT))\n time.sleep(sleepTime)\n\n def clear(self):\n self.pixelList = list()\n self.device.clear()\n \n def brightness(self, brightness):\n self.device.contrast(brightness)","sub_path":"examples/MyMax7219.py","file_name":"MyMax7219.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"423133275","text":"#from distutils.core import setup\n#from distutils.extension import Extension\nfrom setuptools import setup\nfrom setuptools.extension import Extension\n\nimport subprocess\n\nimport os\nimport numpy\n\ntry:\n from Cython.Build import cythonize\n USE_CYTHON = True\nexcept ImportError:\n USE_CYTHON = False\n\nif not os.path.exists('grizli/utils_c/interp.pyx'):\n USE_CYTHON = False\n \nif USE_CYTHON:\n cext = '.pyx'\nelse:\n cext = '.c'\n\nprint('C extension: {0}'.format(cext))\n\nextensions = [\n Extension(\"grizli.utils_c.interp\", [\"grizli/utils_c/interp\"+cext],\n include_dirs = [numpy.get_include()],\n libraries=[\"m\"]),\n \n Extension(\"grizli.utils_c.disperse\", [\"grizli/utils_c/disperse\"+cext],\n include_dirs = [numpy.get_include()],\n libraries=[\"m\"]),\n\n]\n\n#update version\nargs = 'git describe --tags'\np = subprocess.Popen(args.split(), stdout=subprocess.PIPE)\nversion = p.communicate()[0].decode(\"utf-8\").strip()\n\n# version = \"0.8.0\"\n\nversion_str = \"\"\"# git describe --tags\n__version__ = \"{0}\"\\n\"\"\".format(version)\n\nfp = open('grizli/version.py','w')\nfp.write(version_str)\nfp.close()\nprint('Git version: {0}'.format(version))\n\nif USE_CYTHON:\n extensions = cythonize(extensions)\n\n# Utility function to read the README file.\n# Used for the long_description. It's nice, because now 1) we have a top level\n# README file and 2) it's easier to type in the README file than to put a raw\n# string in below ...\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nsetup(\n name = \"grizli\",\n version = version,\n author = \"Gabriel Brammer\",\n author_email = \"gbrammer@gmail.com\",\n description = \"Grism redshift and line analysis software\",\n license = \"MIT\",\n url = \"https://github.com/gbrammer/grizli\",\n download_url = \"https://github.com/gbrammer/grizli/tarball/{0}\".format(version),\n packages=['grizli', 'grizli/pipeline', 'grizli/utils_c', 'grizli/tests', 'grizli/galfit'],\n # requires=['numpy', 'scipy', 'astropy', 'drizzlepac', 'stwcs'],\n # long_description=read('README.rst'),\n classifiers=[\n \"Development Status :: 1 - Planning\",\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering :: Astronomy',\n ],\n ext_modules = extensions,\n package_data={'grizli': ['data/*', 'data/templates/*', 'data/templates/stars/*', 'data/templates/fsps/*']},\n # scripts=['grizli/scripts/flt_info.sh'],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"336362619","text":"#Training on 5000 episodes\n\n# No baseline, undiscounted - tmux 8\n# No baseline, discounted - tmux 14\n# Baseline, undiscounted - tmux 10\n# Baseline, discounted - tmux 15\n\n# Notice their graphs, take a point where one converges the best and show test results at that point\n\n# Testing on let's say 1000 episodes of training\n\n# No baseline, undiscounted\n# No baseline, discounted\n# Baseline, undiscounted\n# Baseline, discounted\n\n#!/usr/bin/env python3\n\n\"\"\"\n__author__ = \"Craig Sherstan\"\n__copyright__ = \"Copyright 2019\"\n__credits__ = [\"Craig Sherstan\"]\n__email__ = \"sherstan@ualberta.ca\"\n\"\"\"\n\n\"\"\"\nYou are free to additional imports as needed... except please do not add any additional packages or dependencies to\nyour virtualenv other than those specified in requirements.txt. If I can't run it using the virtualenv I specified,\nwithout any additional installs, there will be a penalty.\n\nI've included a number of imports that I think you'll need.\n\"\"\"\nimport torch\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport gym\nfrom network import network_factory\nfrom network import PolicyNetwork\nfrom network import ValueNetwork\nimport argparse\nimport numpy as np\n\nfrom torch.utils.tensorboard import SummaryWriter\nimport os\nimport sys\nfrom torch import nn\nfrom torch import optim\nimport pickle\n\n\n# prevents type-3 fonts, which some conferences disallow.\nmatplotlib.rcParams['pdf.fonttype'] = 42\nmatplotlib.rcParams['ps.fonttype'] = 42\n\n\ndef make_env():\n env = gym.make('CartPole-v1')\n return env\n\n\ndef sliding_window(data, N):\n \"\"\"\n For each index, k, in data we average over the window from k-N-1 to k. The beginning handles incomplete buffers,\n that is it only takes the average over what has actually been seen.\n :param data: A numpy array, length M\n :param N: The length of the sliding window.\n :return: A numpy array, length M, containing smoothed averaging.\n \"\"\"\n\n idx = 0\n window = np.zeros(N)\n smoothed = np.zeros(len(data))\n\n for i in range(len(data)):\n window[idx] = data[i]\n idx += 1\n\n smoothed[i] = window[0:idx].mean()\n\n if idx == N:\n window[0:-1] = window[1:]\n idx = N - 1\n\n return smoothed\n \n\ndef discount_returns(rewards, gamma):\n # Discounts rewards and stores their cumulative return in reverse\n '''\n r = np.array([gamma**i * rewards[i] for i in range(len(rewards))])\n r = r[::-1].cumsum()[::-1]\n return r\n '''\n \n r = rewards[::-1] #rewards in reverse\n G = [r[0]] #last rewards\n for i in range(1,len(r)):\n G.append(r[i] + gamma*G[-1])\n G = G[::-1]\n G = np.array(G)\n return G\n\n\ndef reinforce(env, policy_estimator, value_estimator, num_episodes, # value_estimator=None,\n batch_size, gamma):\n\n # Set up lists to hold results\n total_rewards = []\n batch_rewards = []\n batch_actions = []\n batch_states = []\n batch_counter = 0\n writer = SummaryWriter()\n \n # Define optimizer\n optimizer = optim.Adam(policy_estimator.network.parameters(), lr=0.0025)\n optimizer_v = optim.Adam(value_estimator.network_v.parameters(), lr=0.001)\n \n action_space = np.arange(env.action_space.n)\n flag = 1 # 1 for train, 0 for test\n for ep in range(num_episodes):\n s_0 = env.reset()\n states = []\n rewards = []\n actions = []\n complete = False\n t = 0\n while complete == False:\n t += 1\n \n # Gets reward and next state\n \n action = policy_estimator.get_action(s_0)\n s_1, r, complete, _ = env.step(action)\n \n states.append(s_0)\n rewards.append(r)\n actions.append(action)\n s_0 = s_1\n\n # Checks if episode is over\n \n if complete:\n \n batch_counter += 1\n batch_rewards.extend(discount_returns(rewards, gamma))\n batch_states.extend(states)\n batch_actions.extend(actions)\n \n total_rewards.append(sum(rewards))\n \n # Updates after batch of episodes, here batch is 1\n \n if batch_counter == batch_size:\n if flag == 1:\n \n # Value update\n \n state_tensor_v = torch.tensor(batch_states, dtype=torch.float32)\n reward_tensor_v = torch.tensor(batch_rewards, dtype=torch.float32)\n value_estimates = value_estimator.forward(state_tensor_v)\n loss_v = torch.mean((reward_tensor_v-value_estimates.view(1,-1)[0])**2)\n\n optimizer_v.zero_grad()\n loss_v.backward(retain_graph=True)\n optimizer_v.step() \n \n # Policy update\n \n state_tensor = torch.tensor(batch_states, dtype=torch.float32)\n reward_tensor = torch.tensor(batch_rewards, dtype=torch.float32)\n action_tensor = torch.tensor(batch_actions, dtype=torch.int32)\n loss = - (gamma**t) * torch.mean(policy_estimator.forward(state_tensor).log_prob(action_tensor)*(reward_tensor-value_estimates.view(1,-1)[0])) #-value_estimates\n #loss = - torch.mean(policy_estimator.forward(state_tensor).log_prob(action_tensor)*(reward_tensor))\n optimizer.zero_grad()\n loss.backward(retain_graph=True)\n optimizer.step()\n \n batch_rewards = []\n batch_actions = []\n batch_states = []\n batch_counter = 0\n \n print(\"Ep: {} Average of last 100: {:.2f}\".format(\n ep + 1, np.mean(total_rewards[-100:])))\n \n # Saves policy \n \n \n #if (ep + 1) % 10000 == 0:\n # torch.save(pe.network.state_dict(), 'saved_policies_cartpole1_50k/baseline/saved_network_'+ str(ep + 1) + '.pkl')\n '''\n if flag == 1:\n \n # Tensorboard plots\n \n writer.add_scalar('return', total_rewards[-1], ep) #discounted rewards with gamma = 1, hence undiscounted\n writer.add_scalar('loss/policy', loss, ep)\n writer.add_scalar('loss/value', loss_v, ep)\n writer.add_scalar('loss/total', 0.98*loss + 0.02*loss_v, ep)\n for name, params in zip(policy_estimator.network.state_dict().keys(), policy_estimator.network.parameters()):\n average_grad = torch.mean(params.grad**2)\n writer.add_scalar('gradient_policy/'+str(name), average_grad, ep)\n for name, params in zip(value_estimator.network_v.state_dict().keys(), value_estimator.network_v.parameters()):\n average_grad = torch.mean(params.grad**2)\n writer.add_scalar('gradient_value/'+str(name), average_grad, ep)\n ''' \n \n return total_rewards\n\nif __name__ == '__main__':\n\n \"\"\"\n You are free to add additional command line arguments, but please ensure that the script will still run with:\n python main.py --episodes 10000\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--episodes\", \"-e\", default=5000, type=int, help=\"Number of episodes to train for\")\n args = parser.parse_args()\n\n episodes = args.episodes\n\n \"\"\"\n It is unlikely that the GPU will help in this instance (since the size of individual operations is small) - in fact \n there's a good chance it could slow things down because we have to move data back and forth between CPU and GPU.\n Regardless I'm leaving this in here. For those of you with GPUs this will mean that you will need to move your \n tensors to GPU.\n \"\"\"\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n numrun = 50\n \n for run in range(numrun):\n env = make_env()\n\n in_size = env.observation_space.shape[0]\n num_actions = env.action_space.n\n \n gamma = 1.0\n batch_size = 1\n network = network_factory(in_size, num_actions, env)\n network.to(device)\n pe = PolicyNetwork(network)\n \n # Load policy to test\n #pe.network.load_state_dict(torch.load('saved_network_50000_baseline.pkl'))\n ve = ValueNetwork(in_size)\n ep_returns = reinforce(env, pe, ve, episodes, batch_size, gamma) #,ve , loss_policy, loss_value\n \n fwrite = open('runs_data_cartpole1_50runs/training/baseline/'+str(run)+'.pkl','wb')\n pickle.dump(ep_returns, fwrite)\n fwrite.close()\n \n \n \n #window = 10\n #plt.figure(figsize=(12,8))\n #plt.plot(sliding_window(ep_returns, window))\n #plt.title(\"Episode Return\")\n #plt.xlabel(\"Episode\")\n #plt.ylabel(\"Average Return (Sliding Window 10)\")\n #plt.show()\n\n\n\n # save your network\n #torch.save(pe.network.state_dict(), 'saved_network_50000.pkl')\n \n","sub_path":"652/Assignments/Assignment2/main_baseline_training.py","file_name":"main_baseline_training.py","file_ext":"py","file_size_in_byte":9392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"15430191","text":"def quizzes():\n _, X = map(int, input().split())\n S = input()\n\n for i in S:\n if i == 'o':\n X += 1\n else:\n X = X-1 if X > 0 else 0\n\n print(X)\n\n\nif __name__ == \"__main__\":\n quizzes()\n","sub_path":"AtCoderBeginners Selection/184/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"107072199","text":"from enum import Enum\n\nfrom .command import Command\nfrom .fields import Enum as EnumField, Int, Bool, Str, Time, DateTime, Bitmask\nfrom .utils import parse_enum_bitmask\n\n\nclass SERIAL_NUMBER(Command):\n CMD = 0x0B\n GET, SET = True, False\n DATA = [Str('SERIAL_NUMBER')]\n\n\nclass SOFTWARE_VERSION(Command):\n CMD = 0x0E\n GET, SET = True, False\n DATA = [Str('SOFTWARE_VERSION')]\n\n\nclass MODEL_NUMBER(Command):\n CMD = 0x10\n GET, SET = True, False\n\n class MODEL_SPECIES(Enum):\n PDP = 0x01\n LCD = 0x02\n DLP = 0x03\n LED = 0x04\n CRT = 0x05\n OLED = 0x06\n\n class TV_SUPPORT(Enum):\n SUPPORTED = 0x00\n NOT_SUPPORTED = 0x01\n\n # NOTE: Actually there is list of MODEL_CODE in specification,\n # but it's TOO long, and it's TOO old (newer models gets new code)\n DATA = [MODEL_SPECIES, Int('MODEL_CODE'), TV_SUPPORT]\n\n\nclass POWER(Command):\n CMD = 0x11\n GET, SET = True, True\n\n class POWER_STATE(Enum):\n OFF = 0x00\n ON = 0x01\n REBOOT = 0x02\n\n DATA = [POWER_STATE]\n\n\nclass VOLUME(Command):\n CMD = 0x12\n GET, SET = True, True\n VOLUME = Int('VOLUME', range(101))\n DATA = [VOLUME]\n\n\nclass MUTE(Command):\n CMD = 0x13\n GET, SET = True, True\n\n class MUTE_STATE(Enum):\n OFF = 0x00\n ON = 0x01\n\n DATA = [MUTE_STATE]\n\n\nclass INPUT_SOURCE(Command):\n CMD = 0x14\n GET, SET = True, True\n\n class INPUT_SOURCE_STATE(Enum):\n S_VIDEO = 0x04\n COMPONENT = 0x08\n AV = 0x0C\n AV2 = 0x0D\n SCART1 = 0x0E\n DVI = 0x18\n PC = 0x14\n BNC = 0x1E\n DVI_VIDEO = 0x1F\n MAGIC_INFO = 0x20\n HDMI1 = 0x21\n HDMI1_PC = 0x22\n HDMI2 = 0x23\n HDMI2_PC = 0x24\n DISPLAY_PORT_1 = 0x25\n DISPLAY_PORT_2 = 0x26\n DISPLAY_PORT_3 = 0x27\n RF_TV = 0x30\n HDMI3 = 0x31\n HDMI3_PC = 0x32\n HDMI4 = 0x33\n HDMI4_PC = 0x34\n TV_DTV = 0x40\n PLUG_IN_MODE = 0x50\n HD_BASE_T = 0x55\n MEDIA_MAGIC_INFO_S = 0x60\n WIDI_SCREEN_MIRRORING = 0x61\n INTERNAL_USB = 0x62\n URL_LAUNCHER = 0x63\n IWB = 0x64\n\n DATA = [INPUT_SOURCE_STATE]\n\n\nclass PICTURE_ASPECT(Command):\n CMD = 0x15\n GET, SET = True, True\n\n class PICTURE_ASPECT_STATE(Enum):\n PC_16_9 = 0x10\n PC_4_3 = 0x18\n PC_ORIGINAL_RATIO = 0x20\n PC_21_9 = 0x21\n\n VIDEO_AUTO_WIDE = 0x00\n VIDEO_16_9 = 0x01\n VIDEO_ZOOM = 0x04\n VIDEO_ZOOM_1 = 0x05\n VIDEO_ZOOM_2 = 0x06\n VIDEO_SCREEN_FIT = 0x09\n VIDEO_4_3 = 0x0B\n VIDEO_WIDE_FIT = 0x0C\n VIDEO_CUSTOM = 0x0D\n VIDEO_SMART_VIEW_1 = 0x0E\n VIDEO_SMART_VIEW_2 = 0x0F\n VIDEO_WIDE_ZOOM = 0x31\n VIDEO_21_9 = 0x32\n\n DATA = [PICTURE_ASPECT_STATE]\n\n\nclass SCREEN_MODE(Command):\n CMD = 0x18\n GET, SET = True, True\n\n class SCREEN_MODE_STATE(Enum):\n MODE_16_9 = 0x01\n MODE_ZOOM = 0x04\n MODE_4_3 = 0x0B\n MODE_WIDE_ZOOM = 0x31\n\n DATA = [SCREEN_MODE_STATE]\n\n\nclass SCREEN_SIZE(Command):\n CMD = 0x19\n GET, SET = True, False\n\n DATA = [Int('INCHES', range(256))]\n\n\nclass MAGICINFO_SERVER(Command):\n \"\"\"\n MagicInfo Server URL (example: \"http://example.com:80\")\n \"\"\"\n CMD = 0x1C\n SUBCMD = 0x82\n GET, SET = True, True\n\n DATA = [Str('MAGICINFO_SERVER_URL')]\n\n\nclass MDC_CONNECTION(Command):\n CMD = 0x1D\n GET, SET = True, False\n # NOTE: There is no Set command in documentation,\n # but comment states that this parameter is readonly\n # only for RJ45 connection...\n\n class MDC_CONNECTION_TYPE(Enum):\n RS232C = 0x00\n RJ45 = 0x01\n\n DATA = [MDC_CONNECTION_TYPE]\n\n\nclass CONTRAST(Command):\n CMD = 0x24\n GET, SET = True, True\n DATA = [Int('CONTRAST', range(101))]\n\n\nclass BRIGHTNESS(Command):\n CMD = 0x25\n GET, SET = True, True\n DATA = [Int('BRIGHTNESS', range(101))]\n\n\nclass SHARPNESS(Command):\n CMD = 0x26\n GET, SET = True, True\n DATA = [Int('SHARPNESS', range(101))]\n\n\nclass COLOR(Command):\n CMD = 0x27\n GET, SET = True, True\n DATA = [Int('COLOR', range(101))]\n\n\nclass TINT(Command):\n \"\"\"\n Tint value code to be set on TV/Monitor.\n R: Tint Value, G: ( 100 - Tint ) Value.\n\n Note: Tint could only be set in 50 Steps (0, 2, 4, 6... 100).\n \"\"\"\n CMD = 0x28\n GET, SET = True, True\n DATA = [Int('TINT', range(101))]\n\n\nclass H_POSITION(Command):\n CMD = 0x31\n GET, SET = False, True\n\n class H_POSITION_MOVE_TO(Enum):\n LEFT = 0x00\n RIGHT = 0x01\n\n DATA = [H_POSITION_MOVE_TO]\n\n\nclass V_POSITION(Command):\n CMD = 0x32\n GET, SET = False, True\n\n class V_POSITION_MOVE_TO(Enum):\n UP = 0x00\n DOWN = 0x01\n\n DATA = [V_POSITION_MOVE_TO]\n\n\nclass AUTO_POWER(Command):\n CMD = 0x33\n GET, SET = True, True\n\n class AUTO_POWER_STATE(Enum):\n OFF = 0x00\n ON = 0x01\n\n DATA = [AUTO_POWER_STATE]\n\n\nclass CLEAR_MENU(Command):\n CMD = 0x34\n SUBCMD = 0x00\n GET, SET = False, True\n\n DATA = []\n\n\nclass IR_STATE(Command):\n \"\"\"\n Enables/disables IR (Infrared) receiving function (Remote Control).\n\n Working Condition:\n * Can operate regardless of whether power is ON/OFF.\n (If DPMS Situation in LFD, it operate Remocon regardless of set value).\n \"\"\"\n CMD = 0x36\n GET, SET = True, True\n\n class IR_STATE(Enum):\n DISABLED = 0x00\n ENABLED = 0x01\n\n DATA = [IR_STATE]\n\n\nclass RGB_CONTRAST(Command):\n CMD = 0x37\n GET, SET = True, True\n DATA = [Int('CONTRAST', range(101))]\n\n\nclass RGB_BRIGHTNESS(Command):\n CMD = 0x38\n GET, SET = True, True\n DATA = [Int('BRIGHTNESS', range(101))]\n\n\nclass AUTO_ADJUSTMENT_ON(Command):\n CMD = 0x3D\n SUBCMD = 0x00\n GET, SET = False, True\n DATA = []\n\n\nclass COLOR_TONE(Command):\n CMD = 0x3E\n GET, SET = True, True\n\n class COLOR_TONE_STATE(Enum):\n COOL_2 = 0x00\n COOL_1 = 0x01\n NORMAL = 0x02\n WARM_1 = 0x03\n WARM_2 = 0x04\n OFF = 0x50\n\n DATA = [COLOR_TONE_STATE]\n\n\nclass COLOR_TEMPERATURE(Command):\n \"\"\"\n Color temperature function.\n\n Unit is hectoKelvin (hK) (x*100 Kelvin) (example: 28 = 2800K).\n\n Supported values - 28, 30, 35, 40... 160.\n\n For older models: 0-10=(x*100K + 5000K), 253=2800K, 254=3000K, 255=4000K\n \"\"\"\n CMD = 0x3F\n GET, SET = True, True\n\n DATA = [Int('HECTO_KELVIN')]\n\n\nclass STANDBY(Command):\n CMD = 0x4A\n GET, SET = True, True\n\n class STANDBY_STATE(Enum):\n OFF = 0x00\n ON = 0x01\n AUTO = 0x02\n\n DATA = [STANDBY_STATE]\n\n\nclass AUTO_LAMP(Command):\n \"\"\"\n Auto Lamp function (backlight).\n\n Note: When Manual Lamp Control is on,\n Auto Lamp Control will automatically turn off.\n \"\"\"\n CMD = 0x57\n GET, SET = True, True\n\n DATA = [\n Time('MAX_TIME'),\n Int('MAX_LAMP_VALUE', range(101)),\n Time('MIN_TIME'),\n Int('MIN_LAMP_VALUE', range(101)),\n ]\n\n\nclass MANUAL_LAMP(Command):\n \"\"\"\n Manual Lamp function (backlight).\n\n Note: When Auto Lamp Control is on,\n Manual Lamp Control will automatically turn off.\n \"\"\"\n CMD = 0x58\n GET, SET = True, True\n DATA = [Int('LAMP_VALUE', range(101))]\n\n\nclass INVERSE(Command):\n CMD = 0x5A\n GET, SET = True, True\n\n class INVERSE_STATE(Enum):\n OFF = 0x00\n ON = 0x01\n\n DATA = [INVERSE_STATE]\n\n\nclass SAFETY_LOCK(Command):\n CMD = 0x5D\n GET, SET = True, True\n\n class LOCK_STATE(Enum):\n OFF = 0x00\n ON = 0x01\n\n DATA = [LOCK_STATE]\n\n\nclass PANEL_LOCK(Command):\n CMD = 0x5F\n GET, SET = True, True\n\n class LOCK_STATE(Enum):\n OFF = 0x00\n ON = 0x01\n\n DATA = [LOCK_STATE]\n\n\nclass CHANNEL_CHANGE(Command):\n CMD = 0x61\n GET, SET = False, True\n\n class CHANGE_TO(Enum):\n UP = 0x00\n DOWN = 0x01\n\n DATA = [CHANGE_TO]\n\n\nclass VOLUME_CHANGE(Command):\n CMD = 0x62\n GET, SET = False, True\n\n class CHANGE_TO(Enum):\n UP = 0x00\n DOWN = 0x01\n\n DATA = [CHANGE_TO]\n\n\nclass DEVICE_NAME(Command):\n \"\"\"\n It reads the device name which user set up in network.\n Shows the information about entered device name.\n \"\"\"\n CMD = 0x67\n GET, SET = True, False\n DATA = [Str('DEVICE_NAME')]\n\n\nclass OSD(Command):\n CMD = 0x70\n GET, SET = True, True\n\n DATA = [Bool('OSD_ENABLED')]\n\n\nclass ALL_KEYS_LOCK(Command):\n \"\"\"\n Turns both REMOCON and Panel Key Lock function on/off.\n\n Note: Can operate regardless of whether power is on/off.\n \"\"\"\n\n # TODO: REMOCON? Remote Control?\n CMD = 0x77\n GET, SET = True, True\n\n class LOCK_STATE(Enum):\n OFF = 0x00\n ON = 0x01\n\n DATA = [LOCK_STATE]\n\n\nclass MODEL_NAME(Command):\n CMD = 0x8A\n GET, SET = True, False\n DATA = [Str('MODEL_NAME')]\n\n\nclass ENERGY_SAVING(Command):\n CMD = 0x92\n GET, SET = True, True\n\n class ENERGY_SAVING_STATE(Enum):\n OFF = 0x00\n LOW = 0x01\n MEDIUM = 0x02\n HIGH = 0x03\n PICTURE_OFF = 0x04\n\n DATA = [ENERGY_SAVING_STATE]\n\n\nclass RESET(Command):\n CMD = 0x9F\n GET, SET = False, True\n\n class RESET_TARGET(Enum):\n PICTURE = 0x00\n SOUND = 0x01\n SETUP = 0x02 # (System reset)\n ALL = 0x03\n SCREEN_DISPLAY = 0x04\n\n DATA = [RESET_TARGET]\n\n\nclass OSD_TYPE(Command):\n CMD = 0xA3\n GET, SET = True, True\n\n class OSD_TYPE(Enum):\n SOURCE = 0x00\n NOT_OPTIMUM_MODE = 0x01\n NO_SIGNAL = 0x02\n MDC = 0x03\n SCHEDULE_CHANNEL = 0x04\n\n DATA = [OSD_TYPE, Bool('OSD_ENABLED')]\n\n @classmethod\n def parse_response_data(cls, data):\n return parse_enum_bitmask(cls.OSD_TYPE, data[0])\n\n\nclass TIMER_15(Command):\n \"\"\"\n Integrated timer function (15 parameters version).\n\n Note: This depends on product and will not work on older versions.\n\n ON_TIME/OFF_TIME - Turn ON/OFF display at specific time of day\n\n ON_ACTIVE/OFF_ACTIVE - If timer is not active, values are ignored,\n so there may be only OFF timer, ON timer, or both.\n\n REPEAT - On which day timer is enabled\n (combined with HOLIDAY_APPLY and MANUAL_WEEKDAY)\n \"\"\"\n CMD = Int('TIMER_ID', range(1, 8))\n _TIMER_ID_CMD = [0xA4, 0xA5, 0xA6, 0xAB, 0xAC, 0xAD, 0xAE]\n GET, SET = True, True\n\n class TIMER_REPEAT(Enum):\n ONCE = 0x00\n EVERYDAY = 0x01\n MON_FRI = 0x02\n MON_SAT = 0x03\n SAT_SUN = 0x04\n MANUAL_WEEKDAY = 0x05\n\n class WEEKDAY(Enum):\n SUN = 0\n MON = 1\n TUE = 2\n WED = 3\n THU = 4\n FRI = 5\n SAT = 6\n # ignore_bit_7 = 7\n\n class HOLIDAY_APPLY(Enum):\n DONT_APPLY_BOTH = 0x00\n APPLY_BOTH = 0x01\n ON_TIMER_ONLY_APPLY = 0x02\n OFF_TIMER_ONLY_APPLY = 0x03\n\n DATA = [\n Time('ON_TIME'),\n Bool('ON_ENABLED'),\n\n Time('OFF_TIME'),\n Bool('OFF_ENABLED'),\n\n EnumField(TIMER_REPEAT, 'ON_REPEAT'),\n # TODO: implement bitmask field\n Bitmask(WEEKDAY, 'ON_MANUAL_WEEKDAY'),\n\n EnumField(TIMER_REPEAT, 'OFF_REPEAT'),\n Bitmask(WEEKDAY, 'OFF_MANUAL_WEEKDAY'),\n\n VOLUME.VOLUME,\n INPUT_SOURCE.INPUT_SOURCE_STATE,\n HOLIDAY_APPLY,\n ]\n\n async def __call__(self, connection, display_id, timer_id, data):\n cmd = self._TIMER_ID_CMD[timer_id - 1]\n data = self.parse_response(\n await connection.send(\n cmd, display_id,\n self.pack_payload_data(data) if data else []\n ),\n )\n return self.parse_response_data(data)\n\n @classmethod\n def get_order(cls):\n return (0xA4, cls.name)\n\n\nclass TIMER_13(TIMER_15):\n \"\"\"\n Integrated timer function (13 parameters version).\n\n Note: This depends on product and will not work on newer versions.\n \"\"\"\n DATA = ([\n f for f in TIMER_15.DATA\n if not f.name.endswith('_REPEAT')\n and not f.name.endswith('_MANUAL_WEEKDAY')\n ] + [\n EnumField(TIMER_15.TIMER_REPEAT, 'REPEAT'),\n Bitmask(TIMER_15.WEEKDAY, 'MANUAL_WEEKDAY'),\n ])\n\n\nclass CLOCK_S(Command):\n \"\"\"\n Current time function (second precision).\n\n Note: This is for models developed after 2013.\n For older models see CLOCK_M function (minute precision).\n \"\"\"\n GET, SET = True, True\n CMD = 0xC5\n\n DATA = [DateTime()]\n\n\nclass CLOCK_M(CLOCK_S):\n \"\"\"\n Current time function (minute precision).\n\n Note: This is for models developed until 2013.\n For newer models see CLOCK_S function (seconds precision).\n \"\"\"\n CMD = 0xA7\n\n DATA = [DateTime(seconds=False)]\n\n\nclass VIRTUAL_REMOTE(Command):\n \"\"\"\n This function support that MDC command can work same as remote control.\n\n Note: In a certain model, 0x79 content key works as Home\n and 0x1f Display key works as Info.\n \"\"\"\n CMD = 0xB0\n GET, SET = False, True\n\n class REMOTE_KEY_CODE(Enum):\n KEY_SOURCE = 0x01\n KEY_POWER = 0x02\n KEY_1 = 0x04\n KEY_2 = 0x05\n KEY_3 = 0x06\n KEY_VOLUME_UP = 0x07\n KEY_4 = 0x08\n KEY_5 = 0x09\n KEY_6 = 0x0A\n KEY_VOLUME_DOWN = 0x0B\n KEY_7 = 0x0C\n KEY_8 = 0x0D\n KEY_9 = 0x0E\n KEY_MUTE = 0x0F\n KEY_CHANNEL_DOWN = 0x10\n KEY_0 = 0x11\n KEY_CHANNEL_UP = 0x12\n KEY_GREEN = 0x14\n KEY_YELLOW = 0x15\n KEY_CYAN = 0x16\n KEY_MENU = 0x1A\n KEY_DISPLAY = 0x1F\n KEY_DIGIT = 0x23\n KEY_PIP_TV_VIDEO = 0x24\n KEY_EXIT = 0x2D\n KEY_REW = 0x45\n KEY_STOP = 0x46\n KEY_PLAY = 0x47\n KEY_FF = 0x48\n KEY_PAUSE = 0x4A\n KEY_TOOLS = 0x4B\n KEY_RETURN = 0x58\n KEY_MAGICINFO_LITE = 0x5B\n KEY_CURSOR_UP = 0x60\n KEY_CURSOR_DOWN = 0x61\n KEY_CURSOR_RIGHT = 0x62\n KEY_CURSOR_LEFT = 0x65\n KEY_ENTER = 0x68\n KEY_RED = 0x6C\n KEY_LOCK = 0x77\n KEY_CONTENT = 0x79 # HOME\n DISCRET_POWER_OFF = 0x98\n KEY_3D = 0x9F\n\n DATA = [REMOTE_KEY_CODE]\n\n\nclass NETWORK_STANDBY(Command):\n CMD = 0xB5\n GET, SET = True, True\n\n class NETWORK_STANDBY_STATE(Enum):\n OFF = 0x00\n ON = 0x01\n\n DATA = [NETWORK_STANDBY_STATE]\n\n\nclass DST(Command):\n CMD = 0xB6\n GET, SET = True, True\n\n class DST_STATE(Enum):\n OFF = 0x00\n AUTO = 0x01\n MANUAL = 0x02\n\n class MONTH(Enum):\n JAN = 0x00\n FEB = 0x01\n MAR = 0x02\n APR = 0x03\n MAY = 0x04\n JUN = 0x05\n JUL = 0x06\n AUG = 0x07\n SEP = 0x08\n OCT = 0x09\n NOV = 0x0A\n DEC = 0x0B\n\n class WEEK(Enum):\n WEEK_1 = 0x00\n WEEK_2 = 0x01\n WEEK_3 = 0x02\n WEEK_4 = 0x03\n WEEK_LAST = 0x04\n\n class DAY_OF_WEEK(Enum):\n SUN = 0x00\n MON = 0x01\n TUE = 0x02\n WED = 0x03\n THU = 0x04\n FRI = 0x05\n SAT = 0x06\n\n class OFFSET(Enum):\n PLUS_1_00 = 0x00\n PLUS_2_00 = 0x01\n\n DATA = [\n DST_STATE,\n EnumField(MONTH, 'START_MONTH'),\n EnumField(WEEK, 'START_WEEK'),\n EnumField(DAY_OF_WEEK, 'START_DAY_OF_WEEK'),\n Int('START_HOUR', range(24)),\n Int('START_MINUTE', range(60)),\n EnumField(MONTH, 'END_MONTH'),\n EnumField(WEEK, 'END_WEEK'),\n EnumField(DAY_OF_WEEK, 'END_DAY_OF_WEEK'),\n Int('END_HOUR', range(24)),\n Int('END_MINUTE', range(60)),\n OFFSET,\n ]\n\n RESPONSE_DATA = DATA + [Bool('TUNER_SUPPORT')]\n\n\nclass AUTO_ID_SETTING(Command):\n CMD = 0xB8\n GET, SET = True, True\n\n class AUTO_ID_SETTING_STATE(Enum):\n START = 0x00\n END = 0x01\n\n DATA = [AUTO_ID_SETTING_STATE]\n\n\nclass DISPLAY_ID(Command):\n CMD = 0xB9\n GET, SET = False, True\n\n class DISPLAY_ID_STATE(Enum):\n OFF = 0x00\n ON = 0x01\n\n DATA = [DISPLAY_ID_STATE]\n\n\nclass LAUNCHER_PLAY_VIA(Command):\n CMD = 0xC7\n SUBCMD = 0x81\n GET, SET = True, True\n\n class PLAY_VIA_MODE(Enum):\n MAGIC_INFO = 0x00\n URL_LAUNCHER = 0x01\n MAGIC_IWB = 0x02\n\n DATA = [PLAY_VIA_MODE]\n\n\nclass LAUNCHER_URL_ADDRESS(Command):\n CMD = 0xC7\n SUBCMD = 0x82\n GET, SET = True, True\n DATA = [Str('URL_ADDRESS')]\n\n\nclass PANEL(Command):\n CMD = 0xF9\n GET, SET = True, True\n\n class PANEL_STATE(Enum):\n ON = 0x00\n OFF = 0x01\n\n DATA = [PANEL_STATE]\n\n\nclass STATUS(Command):\n CMD = 0x00\n GET, SET = True, False\n DATA = [\n POWER.POWER_STATE, VOLUME.VOLUME, MUTE.MUTE_STATE,\n INPUT_SOURCE.INPUT_SOURCE_STATE, PICTURE_ASPECT.PICTURE_ASPECT_STATE,\n Int('N_TIME_NF'), Int('F_TIME_NF')\n ]\n\n\nclass VIDEO(Command):\n CMD = 0x04\n GET, SET = True, False\n DATA = [\n Int('CONTRAST', range(101)), Int('BRIGHTNESS', range(101)),\n Int('SHARPNESS', range(101)), Int('COLOR', range(101)),\n Int('TINT', range(101)), COLOR_TONE.COLOR_TONE_STATE,\n Int('COLOR_TEMPERATURE'), Int('_IGNORE', range(1)),\n ]\n\n\nclass RGB(Command):\n CMD = 0x06\n GET, SET = True, False\n DATA = [\n Int('CONTRAST', range(101)), Int('BRIGHTNESS', range(101)),\n COLOR_TONE.COLOR_TONE_STATE, Int('COLOR_TEMPERATURE'),\n Int('_IGNORE', range(1)),\n Int('RED_GAIN'), Int('GREEN_GAIN'), Int('BLUE_GAIN'),\n ]\n","sub_path":"samsung_mdc/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":17404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"623472226","text":"from ebrains_drive.repos import Repos\nfrom ebrains_drive.files import SeafFile\n\nimport re\n\nclass File(object):\n def __init__(self, client):\n self.client = client\n\n def get_file_by_url(self, file_url):\n \"\"\"Get a single repo associated with specified repo_url\n Example inputs:\n 1) https://drive.ebrains.eu/lib/0fee1620-062d-4643-865b-951de1eee355/file/sample-latest.csv\n 2) https://drive.ebrains.eu/lib/0fee1620-062d-4643-865b-951de1eee355/file/Dir1/data.json\n \"\"\"\n\n regex = r\".*\\/lib\\/(.*)\\/file(\\/.*)\"\n\n matches = re.search(regex, file_url)\n if matches is None:\n raise ValueError(\"Parameter `file_url` does not have expected format!\")\n else:\n repo_id = matches.group(1)\n file_path = matches.group(2)\n\n repo_obj = self.client.repos.get_repo(repo_id)\n file_obj = repo_obj.get_file(file_path)\n return file_obj\n ","sub_path":"ebrains_drive/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"98192905","text":"#### imports\n\nfrom datetime import datetime, timedelta\n # time handling\n\nimport csv\n # csv manipulation\n\nimport random\n # random selection\n\nfrom src.bot_commands.generic import takes_no_args\n # notify user: command takes no arguments\n\n\n#### initialize\n\n\nbooks = []\n\nwith open('assets/books/meta.csv') as meta_file:\n\n csv_reader = csv.reader(meta_file, delimiter=',')\n books_directory = next(csv_reader)[0]\n\n for book_title, book_path, no_lines in csv_reader:\n book = {\n 'title': book_title,\n 'book': open(f'assets/books/{books_directory}/{book_path}'),\n 'no_lines': int(no_lines)\n }\n books.append(book)\n\n\n#### exports\n\n # meta\nmeta = {\n 'name': 'reading',\n 'aliases': [],\n 'description': (\n 'check your reading speed'\n )\n}\n\n # main command\nasync def command(ctx):\n\n # globals\n\n last_sent_message = None\n words_read = 0\n time_taken = timedelta()\n\n last_para_words = None\n start_time = None\n\n from_books = set()\n\n # helper functions\n\n def get_para(no_lines):\n\n # choose random book\n chosen_book = random.choice(books)\n\n # choose random para\n para = ''\n random_start = random.randrange(chosen_book['no_lines']-no_lines)\n for i in range(random_start-1):\n chosen_book['book'].readline()\n for i in range(no_lines):\n para += chosen_book['book'].readline()\n\n # reset file pointer\n chosen_book['book'].seek(0)\n\n return para, chosen_book['title']\n\n # return string containing reading speed\n def reading_speed_str():\n if words_read == 0:\n return 'no data to calculate reading speed'\n\n reading_speed = (words_read / time_taken.total_seconds())*60\n reading_speed = round(reading_speed, 2)\n\n if (reading_speed<200) :\n return(f\"{message.author.mention}'s reading speed: **{reading_speed} wpm**\\nComprehension: 50%\\n **Slow reader**\\nYou have many possibilities for improvement\\n Tips to improve:\\n 1. Word–Chunking\\n2.Do Not Reread the Words on the Page\\n3.Use Peripheral Vision\\n4.Work on Improving Your Vocabulary\\n\"+ \n \"you read excerpts from:\\n\"\n + '**' + \"\\n\".join(from_books) + '**' )\n\n elif(reading_speed>200 and reading_speed<300):\n return(f\"{message.author.mention}'s reading speed: **{reading_speed} wpm**\\nComprehension: 60%\\n **Average reader**\\nYou are an oral reader. You may rapidly and significantly progress by suppressing subvocalization.\\n Tips to improve:\\n1. Word–Chunking\\n2.Do Not Reread the Words on the Page\\n3.Use Peripheral Vision\\n4.Work on Improving Your Vocabulary\\n\" + \n \"you read excerpts from:\"\n + '**' + \"\\n\".join(from_books) + '**' )\n elif(reading_speed>300 and reading_speed<450):\n return(f\"{message.author.mention}'s reading speed: **{reading_speed} wpm**\\nComprehension: 80%\\n **Good reader**\\nYou are an auditory reader.\\n Tips to improve:\\n 1. Word–Chunking\\n2.Do Not Reread the Words on the Page\\n3.Use Peripheral Vision\\n4.Work on Improving Your Vocabulary\\n\" + \n \"you read excerpts from:\"\n + '**' + \"\\n\".join(from_books) + '**' )\n else:\n return(f\"{message.author.mention}'s reading speed: **{reading_speed} wpm**\\nComprehension: 85%\\n **Excellent, accomplished reader**\\nYou are a visual reader. Your reading speed is the gem of your CV\\n\" + \n \"you read excerpts from:\"\n + '**' + \"\\n\".join(from_books) + '**' ) \n\n \n \n\n # called when user times out\n async def timeout_caller():\n\n # construct timeout message\n timeout_message = (\n 'late reply..\\n'\n 'aborted'\n )\n\n # append reading speed if at least one para shown\n if last_sent_message != instructions_message:\n timeout_message += '\\n\\n'\n timeout_message += (\n reading_speed_str()\n )\n\n # display timeout message\n await message.channel.send(timeout_message)\n\n\n async def next_para(ctx):\n emoji = ctx['reaction'].emoji\n\n # non locals\n nonlocal timeout\n nonlocal last_sent_message, words_read, time_taken\n nonlocal last_para_words, start_time\n nonlocal from_books\n\n # game aborted\n if emoji == '❌':\n # user has not played\n if last_sent_message == instructions_message:\n await message.channel.send('game aborted by user')\n return\n # construct reading speed message\n aborted_str = (\n 'game stopped by user..\\n'\n '**note**: aborted para not counted in calculation\\n\\n'\n +\n reading_speed_str()\n )\n await message.channel.send(aborted_str)\n return\n\n assert emoji == '✅', 'unexpected emoji received'\n\n\n # if there is data to append\n if last_sent_message != instructions_message:\n time_taken += datetime.now() - start_time\n words_read += last_para_words\n\n # para to send\n para, book = get_para(5)\n from_books.add(book)\n warning = f'\\n\\n({timeout} seconds timeout)'\n\n # display para\n last_sent_message = await message.channel.send(para + warning)\n\n # update current session data\n last_para_words = len(para.split())\n start_time = datetime.now()\n\n # construct session targets\n targets = [{\n 'user': message.author.id,\n 'message': last_sent_message.id,\n 'emoji': '✅'\n }, {\n 'user': message.author.id,\n 'message': last_sent_message.id,\n 'emoji': '❌'\n }]\n # add session\n await bot.add_emoji_session(\n timeout, \n targets, \n next_para, \n timeout_caller\n )\n\n # prompt reactions\n await last_sent_message.add_reaction('✅')\n await last_sent_message.add_reaction('❌')\n\n\n # main\n\n action = ctx['action']\n\n # called with too many args\n if action:\n await takes_no_args(ctx, 'reading')\n\n message = ctx['message']\n bot = ctx['bot']\n\n # timeout for prompts\n timeout = 60 * 2\n # 2 minutes\n # timeout for instructions prompt\n instructions_timeout = 30\n # 30 seconds\n\n # instructions to play the game\n instruction = (\n \"**Let's play a game..**\\n\"\n 'You will be given a paragraph. '\n 'Press the ✅ reaction after FULLY reading the paragraph. '\n 'A new paragraph will be loaded for reading. '\n 'Press the ❌ reaction when you want to stop reading.'\n '\\n\\nPress ✅ to begin!\\n'\n f'({instructions_timeout} seconds timeout)'\n )\n\n # display instructions\n instructions_message = await message.reply(instruction)\n last_sent_message = instructions_message\n\n # construct session targets\n targets = [{\n 'user': message.author.id,\n 'message': instructions_message.id,\n 'emoji': '✅'\n }, {\n 'user': message.author.id,\n 'message': instructions_message.id,\n 'emoji': '❌'\n }]\n # add session\n await bot.add_emoji_session(\n instructions_timeout, \n targets, \n next_para, \n timeout_caller\n )\n\n # prompt reactions\n await instructions_message.add_reaction('✅')\n await instructions_message.add_reaction('❌')\n\n\n # help\nasync def help(ctx):\n message = ctx['message']\n bot = ctx['bot']\n\n await message.reply(\n f'**syntax**: {bot.bot_prefix} reading\\n'\n '**use**: to start reading game'\n )\n","sub_path":"src/bot_commands/reading.py","file_name":"reading.py","file_ext":"py","file_size_in_byte":7859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"594654355","text":"#!/usr/bin/env python\n\nimport os\nimport sys\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\n__author__ = 'Ryan McGrath '\n__version__ = '3.2.0'\n\npackages = [\n 'twython',\n 'twython.streaming'\n]\n\nif sys.argv[-1] == 'publish':\n os.system('python setup.py sdist upload')\n sys.exit()\n\nsetup(\n name='twython',\n version=__version__,\n install_requires=['requests>=2.1.0', 'requests_oauthlib>=0.4.0'],\n author='Ryan McGrath',\n author_email='ryan@venodesigns.net',\n license=open('LICENSE').read(),\n url='https://github.com/ryanmcgrath/twython/tree/master',\n keywords='twitter search api tweet twython stream',\n description='Actively maintained, pure Python wrapper for the \\\n Twitter API. Supports both normal and streaming Twitter APIs',\n long_description=open('README.rst').read() + '\\n\\n' +\n open('HISTORY.rst').read(),\n include_package_data=True,\n packages=packages,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Communications :: Chat',\n 'Topic :: Internet'\n ]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"540120501","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 12 13:25:36 2016\n\n@author: Administrator\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport setDF2\nimport matplotlib.pyplot as plt \n\n#把字符串变化为float类型\ndef toFloat(series):\n l = []\n for i in series:\n l.append(float(i))\n return l \n\ndef 标准正交化(s):\n l = toFloat(s)\n re = []\n for i in l:\n re.append((i - np.mean(l))/np.sqrt(np.var(l)))\n return re \n\ndef set22(l):\n dd = np.array(l);\n dd.shape = 2,2\n df = pd.DataFrame(dd,index = ['无守望先锋','有守望先锋'],columns = ['无lpl','有lpl'])\n return df\n \ndata1 = setDF2.setDF2(\"F:\\\\By\\\\160814\\\\autofull\\\\data1.csv\")\nswxf = setDF2.setDF2(\"F:\\\\By\\\\160814\\\\autofull\\\\守望先锋.csv\")\nlpl = setDF2.setDF2(\"F:\\\\By\\\\160814\\\\autofull\\\\lpl.csv\")\nindex = data1.index\nindex1 = swxf.index\nindex2 = lpl.index\n\nfor i in range(len(data1.columns)):\n data1[data1.columns[i]] = toFloat(data1[data1.columns[i]])\nprint ('傲风和autoll的搜索相关性为:',np.corrcoef(data1.autofull,data1.傲风)[0][1])\n#建立四类数据框;;记录——————data00——————data01——————data10——————data11\nindex0 = []\nindex01 = []\nindex10 = []\nindex11 = []\nfor i in range(len(data1)):\n if((data1.index[i] not in index1) & (data1.index[i] not in index2)):\n index0.append(data1.index[i])\n if((data1.index[i] not in index1) & (data1.index[i] in index2)):\n index01.append(data1.index[i])\n if((data1.index[i] in index1) & (data1.index[i] not in index2)):\n index10.append(data1.index[i])\n if((data1.index[i] in index1) & (data1.index[i] in index2)):\n index11.append(data1.index[i])\ndata00 = pd.DataFrame(data1,index = index0)\ndata01 = pd.DataFrame(data1,index = index01)\ndata10 = pd.DataFrame(data1,index = index10)\ndata11 = pd.DataFrame(data1,index = index11)\n\nl = []\nlvar = []\nl2 = []\n\nprint ('->傲风的平均值',np.mean(toFloat(data00.傲风)),'------','->autofull的平均值:',np.mean(toFloat(data00.autofull)))\nl.append(np.mean(toFloat(data00.傲风)))\nlvar.append(np.sqrt(np.var(toFloat(data00.autofull))))#标准差;浮动范围\nl2.append(np.mean(toFloat(data00.autofull)))\n\nprint ('->傲风的平均值',np.mean(toFloat(data01.傲风)),'------','->autofull的平均值:',np.mean(toFloat(data01.autofull)))\nl.append(np.mean(toFloat(data01.傲风)))\nlvar.append(np.sqrt(np.var(toFloat(data01.autofull))))#标准差;浮动范围\nl2.append(np.mean(toFloat(data01.autofull)))\n\nprint ('->傲风的平均值',np.mean(toFloat(data10.傲风)),'------','->autofull的平均值:',np.mean(toFloat(data10.autofull)))\nl.append(np.mean(toFloat(data10.傲风)))\nlvar.append(np.sqrt(np.var(toFloat(data10.autofull))))#标准差;浮动范围\nl2.append(np.mean(toFloat(data10.autofull)))\n\nprint ('->傲风的平均值',np.mean(toFloat(data11.傲风)),'------','->autofull的平均值:',np.mean(toFloat(data11.autofull)))\nl.append(np.mean(toFloat(data11.傲风)))\nlvar.append(np.sqrt(np.var(toFloat(data11.autofull))))#标准差;浮动范围\nl2.append(np.mean(toFloat(data11.autofull)))\n\nprint ('傲风在四种情况下的平均数值')\ndf1 = set22(l)\nprint (df1)\nprint ('傲风在四种情况下的标准差')\n\nprint ('autofull','----------------')\ndf2 = set22(l2)\nprint (df2)\nprint ('autofull在四种情况下的标准差')\ndf1_var = set22(lvar)\nprint (df1_var)\n\n\nprint('===============================')\n#再计算每个条件下的均值;;判断平均增长量\n\n\n\n\nswxfdf = data10\nswxfdf['守望先锋PCU'] = swxf.pcu\nprint (swxfdf)\nfor i in range(len(swxfdf.columns)):\n swxfdf[swxfdf.columns[i]] = toFloat(swxfdf[swxfdf.columns[i]])\n#判断两组数据的相关性\nprint ('相关性为:',np.corrcoef(标准正交化(swxfdf.傲风),标准正交化(swxfdf.守望先锋PCU))[0][1])\n\nprint ('相关性为:',np.corrcoef(swxfdf.傲风,swxfdf.守望先锋PCU)[0][1])\n\nprint ('相关性为:',np.corrcoef(标准正交化(swxfdf.autofull),标准正交化(swxfdf.守望先锋PCU))[0][1])\n\nprint ('把最后一个单独守望先锋的比赛按照升序排列')\nprint (swxfdf.sort_values(by = '守望先锋PCU'))\n\n\nplt.plot(range(len(data10)),标准正交化(swxfdf.autofull),'r-',\n range(len(data10)),标准正交化(swxfdf.守望先锋PCU),'y-')\n\n\n\n\n\n\n\n","sub_path":"sdfsfdsa.py","file_name":"sdfsfdsa.py","file_ext":"py","file_size_in_byte":4310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"503938989","text":"from __future__ import print_function, division\nimport os\nimport argparse\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset, DataLoader\nfrom model.cnn_geometric_model import CNNGeometric\nfrom data.pf_dataset import PFDataset\nfrom data.places_dataset import PlacesDataset\nfrom data.download_datasets import download_PF_willow\nfrom image.normalization import NormalizeImageDict, normalize_image\nfrom util.torch_util import BatchTensorToVars, str_to_bool\nfrom geotnf.transformation import GeometricTnf\nfrom geotnf.point_tnf import *\nimport matplotlib.pyplot as plt\nfrom skimage import io,color\nimport warnings\nimport cv2\nwarnings.filterwarnings('ignore')\n\n# for compatibility with Python 2\ntry:\n input = raw_input\nexcept NameError:\n pass\n\ndef calculateEssentialMatrix(R,T):\n T_matrix = np.array([[0,-T[2,0],T[1,0]],\n [T[2,0],0,-T[0,0]],\n [-T[1,0],T[0,0],0]])\n E = R*T_matrix\n return E\n\ndef drawlines(img1,img2,lines,pts1,pts2):\n ''' img1 - image on which we draw the epilines for the points in img2\n lines - corresponding epilines '''\n row,c = img1.shape\n img1 = cv2.cvtColor(img1,cv2.COLOR_GRAY2BGR)\n img2 = cv2.cvtColor(img2,cv2.COLOR_GRAY2BGR)\n for r,pt1,pt2 in zip(lines,pts1,pts2):\n color = tuple(np.random.randint(0,255,3).tolist())\n x0,y0 = map(int, [0, (-r[2]/r[1])%row ])\n x1,y1 = map(int, [c, (-(r[2]+r[0]*c)/r[1])%row ])\n cv2.line(img1, (x0,y0), (x1,y1), color,1)\n cv2.circle(img1,tuple(pt1),5,color,-1)\n cv2.circle(img2,tuple(pt2),5,color,-1)\n return img1,img2\n\nfeature_detector = cv2.xfeatures2d.SIFT_create()\ndef generate_matched_keypoints(feature_detector,imgA,imgB):\n (kpsA, descsA) = feature_detector.detectAndCompute(imgA, None)\n (kpsB, descsB) = feature_detector.detectAndCompute(imgB, None)\n\n # BFMatcher with default params\n bf = cv2.BFMatcher()\n matches = bf.knnMatch(descsA, descsB, k=2)\n # Match descriptors.\n #good = bf.match(descsA, descsB)\n # Apply ratio test\n good = []\n for m, n in matches:\n if m.distance < 0.8 * n.distance:\n good.append(m)\n final_kpsA = []\n final_kpsB = []\n\n for m in sorted(good, key = lambda x:x.distance):\n kpBIdx = m.trainIdx\n kpAIdx = m.queryIdx\n pntA = kpsA[kpAIdx]\n pntB = kpsB[kpBIdx]\n final_kpsA.append(pntA.pt)\n final_kpsB.append(pntB.pt)\n #return kpsA,kpsB,good\n return final_kpsA, final_kpsB,good\n\nprint('CNNGeometric PF demo script')\nuse_cuda = torch.cuda.is_available()\n# Argument parsing\nparser = argparse.ArgumentParser(description='CNNGeometric PyTorch implementation')\n# Paths\nparser.add_argument('--model', type=str, default='trained_models/best_checkpoint_resnet18_adam_pose_mse_loss.pth.tar', help='Trained affine model filename')\n#parser.add_argument('--model-tps', type=str, default='trained_models/best_pascal_checkpoint_adam_tps_grid_loss.pth.tar', help='Trained TPS model filename')\nparser.add_argument('--path', type=str, default='/home/develop/Work/Datasets/', help='Path to PF dataset')\nparser.add_argument('--pairs', type=str, default='/home/develop/Work/Datasets/gardens_pairs_path_samples_sift_RANSAC_12kps.csv', help='Path to PF dataset')\nargs = parser.parse_args()\n\ndataset_path=args.path\ndataset_pairs_file = args.pairs\n# Create model\nprint('Creating CNN model...')\n\nmodel = CNNGeometric(use_cuda=use_cuda,geometric_model='pose',arch = 'resnet18')\n\nprint('Load CNN Weights ...')\ncheckpoint = torch.load(args.model, map_location=lambda storage, loc: storage)\nmodel.load_state_dict(checkpoint['state_dict'])\n\n# Dataset and dataloader\ndataset = PlacesDataset(csv_file=dataset_pairs_file,\n training_image_path=dataset_path,\n transform=NormalizeImageDict(['source_image','target_image']))\ndataloader = DataLoader(dataset, batch_size=1,\n shuffle=True, num_workers=4)\nbatchTensorToVars = BatchTensorToVars(use_cuda=use_cuda)\n\nfor source_im_path,target_im_path,batch in dataloader:\n # get random batch of size 1\n batch = batchTensorToVars(batch)\n\n source_im_size = batch['source_im_size']\n target_im_size = batch['target_im_size']\n\n source_points = batch['source_points']\n target_points = batch['target_points']\n\n model.eval()\n theta_pose,_,_ = model(batch)\n\n #source_im_path = batch['source_im_path']\n #target_im_path = batch['target_im_path']\n\n data_np = theta_pose.data.numpy()\n R = data_np[0,0:9].reshape(3,3)\n T = data_np[0,9:].reshape(3,1)\n E = calculateEssentialMatrix(R,T)\n det = np.linalg.det(E)\n if det == 0:\n print('invalid E')\n continue\n F = E\n img1 = cv2.imread(source_im_path[0],0)\n img2 = cv2.imread(target_im_path[0],0)\n\n kpsA,kpsB ,_ = generate_matched_keypoints(feature_detector,img1,img2)\n pts1 = np.int32(kpsA)[:12]\n pts2 = np.int32(kpsB)[:12]\n\n #pts1, pts2 = source_points.data.numpy()[0].reshape(-1,2), target_points.data.numpy()[0].reshape(-1,2)\n # Find epilines corresponding to points in right image (second image) and\n # drawing its lines on left image\n lines1 = cv2.computeCorrespondEpilines(pts2, 2, F)\n lines1 = lines1.reshape(-1, 3)\n img5, img6 = drawlines(img1, img2, lines1, pts1, pts2)\n # Find epilines corresponding to points in left image (first image) and\n # drawing its lines on right image\n lines2 = cv2.computeCorrespondEpilines(pts1, 1, F)\n lines2 = lines2.reshape(-1, 3)\n img3, img4 = drawlines(img2, img1, lines2, pts2, pts1)\n plt.subplot(121), plt.imshow(img5)\n plt.subplot(122), plt.imshow(img3)\n plt.show()\n #cv2.imshow('left',img5)\n #cv2.imshow('right', img3)\n #cv2.waitKey(300)\n #res = input('Run for another example ([y]/n): ')\n #if res == 'n':\n # break\n","sub_path":"demo_pose.py","file_name":"demo_pose.py","file_ext":"py","file_size_in_byte":5837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"465600569","text":"import os\nimport pandas as pd\nimport numpy as np\n\n\nclass DataAnalizer(list):\n def __init__(self, data_dir_path):\n self.file_list=[]\n self.data_dir_path = data_dir_path\n for file in os.listdir(data_dir_path):\n if file.endswith(\".csv\"):\n self.append(pd.read_csv(os.path.join(data_dir_path, file)))\n self.file_list.append(file)\n\n def find(self, name):\n name = name.lower()\n findings = []\n for index in range(len(self)):\n for i in range(self[index].shape[0]):\n status_message = self[index].iloc[i][\"status_message\"]\n try:\n status_message.lower()\n if name in (self[index].iloc[i][\"status_message\"]).lower():\n print((\"%s: %s in: %s row\" % (self.file_list[index][:-4], name, i)))\n except AttributeError:\n pass\n\n def run(self, name):\n self.find(name)\n\n\n","sub_path":"PplOpinion/DataAnalysis/DataAnalysis.py","file_name":"DataAnalysis.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"248221349","text":"from typing import TYPE_CHECKING\nif TYPE_CHECKING:\n\tfrom Platforms.Web.index import WebIndex\n\tfrom Platforms.Discord.main_discord import PhaazebotDiscord\n\nimport json\nimport discord\nfrom aiohttp.web import Response, Request\nfrom Utils.Classes.webrequestcontent import WebRequestContent\nfrom Platforms.Discord.utils import getDiscordServerUsers, getDiscordServerUserAmount\nfrom Platforms.Discord.levels import Calc as LevelCalc\nfrom Platforms.Web.Processing.Api.errors import apiMissingData\nfrom Platforms.Web.Processing.Api.Discord.errors import apiDiscordGuildUnknown\n\nDEFAULT_LIMIT:int = 50\nMAX_LIMIT:int = 100\n\nasync def apiDiscordLevelsGet(cls:\"WebIndex\", WebRequest:Request) -> Response:\n\t\"\"\"\n\t\tDefault url: /api/discord/levels/get\n\t\"\"\"\n\tData:WebRequestContent = WebRequestContent(WebRequest)\n\tawait Data.load()\n\n\t# get required stuff\n\tguild_id:str = Data.getStr(\"guild_id\", \"\", must_be_digit=True)\n\tlimit:int = Data.getInt(\"limit\", DEFAULT_LIMIT, min_x=1, max_x=MAX_LIMIT)\n\toffset:int = Data.getInt(\"offset\", 0, min_x=0)\n\tmember_id:str = Data.getStr(\"member_id\", \"\", must_be_digit=True)\n\tdetailed:bool = Data.getBool(\"detailed\", False) # with names, avatar hash etc.\n\tnickname:bool = Data.getBool(\"nickname\", False) # usernames or nicknames?\n\tname_contains:str = Data.getStr(\"name_contains\", \"\")\n\torder:str = Data.getStr(\"order\", \"\").lower() # order by\n\tedited:int = Data.getInt(\"edited\", 0, min_x=0, max_x=2) # 0 = all, 1 = only nonedited, 2 = only edited\n\n\t# checks\n\tif not guild_id:\n\t\treturn await apiMissingData(cls, WebRequest, msg=\"missing or invalid 'guild_id'\")\n\n\t# format\n\tif order == \"id\":\n\t\torder = \"ORDER BY `id`\"\n\telif order == \"member_id\":\n\t\torder = \"ORDER BY `member_id`\"\n\telif order == \"currency\":\n\t\torder = \"ORDER BY `currency`\"\n\telse:\n\t\torder = \"ORDER BY `rank`, `exp`\"\n\n\tPhaazeDiscord:\"PhaazebotDiscord\" = cls.Web.BASE.Discord\n\tGuild:discord.Guild = discord.utils.get(PhaazeDiscord.guilds, id=int(guild_id))\n\tif not Guild:\n\t\treturn await apiDiscordGuildUnknown(cls, WebRequest)\n\n\t# get levels\n\tres_levels:list = await getDiscordServerUsers(PhaazeDiscord, guild_id=guild_id, member_id=member_id, limit=limit, offset=offset, order_str=order, edited=edited, name_contains=name_contains)\n\n\treturn_list:list = list()\n\n\tfor LevelUser in res_levels:\n\n\t\tlevel_user:dict = LevelUser.toJSON()\n\n\t\tif detailed:\n\t\t\tMem:discord.Member = Guild.get_member(int(LevelUser.member_id))\n\t\t\tlevel_user[\"avatar\"] = Mem.avatar if Mem else None\n\t\t\tlevel_user[\"level\"] = LevelCalc.getLevel(LevelUser.exp)\n\t\t\tif not Mem:\n\t\t\t\tlevel_user[\"username\"] = \"[N/A]\"\n\t\t\telse:\n\t\t\t\tif nickname and Mem.nick:\n\t\t\t\t\tlevel_user[\"username\"] = Mem.nick\n\t\t\t\telse:\n\t\t\t\t\tlevel_user[\"username\"] = Mem.name\n\n\t\treturn_list.append(level_user)\n\n\treturn cls.response(\n\t\ttext=json.dumps( dict(\n\t\t\tresult=return_list,\n\t\t\ttotal=await getDiscordServerUserAmount(PhaazeDiscord, guild_id),\n\t\t\tlimit=limit,\n\t\t\toffset=offset,\n\t\t\tdetailed=detailed,\n\t\t\tstatus=200)\n\t\t),\n\t\tcontent_type=\"application/json\",\n\t\tstatus=200\n\t)\n","sub_path":"Platforms/Web/Processing/Api/Discord/Levels/get.py","file_name":"get.py","file_ext":"py","file_size_in_byte":2983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"563101932","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport csv\n\nTAKE_NEW_DATA = False\nFILENAME = \"data/experiment2_100K_3.csv\"\n\nif TAKE_NEW_DATA:\n import smu\n s = smu.smu()\n \n f = open(FILENAME, \"wb\")\n writer = csv.writer(f)\n\n v_in = np.linspace(0.0, 5, 255)\n i_b = []\n i_e = []\n\n s.set_voltage(2, 0.)\n for v in v_in:\n s.set_voltage(1, v)\n s.autorange(1)\n s.autorange(2)\n i_b.append(s.get_current(1))\n i_e.append(-s.get_current(2))\n\n s.set_voltage(1, 0.)\n\n data = zip(v_in, i_b, i_e)\n writer.writerow([\"V_in(Ch1)\", \"I_b(Ch1)\", \"I_e(Ch2)\"])\n writer.writerows(data)\n f.close()\n\n x = v_in\n y1 = i_b\n y2 = i_e\n\n\nif not TAKE_NEW_DATA:\n with open(FILENAME, 'r') as f:\n reader = csv.reader(f)\n x = []\n y1 = []\n y2 = []\n for i, row in enumerate(reader):\n if i == 0: continue\n x.append(row[0])\n y1.append(row[1])\n y2.append(row[2])\n\n\nif True:\n plt.plot(x, y1, '.', label=\"i_b\")\n plt.legend()\n plt.figure()\n plt.plot(x, y2, '.', label=\"i_e\")\n plt.legend()\n plt.xlabel(\"Voltage\")\n plt.ylabel(\"Current\")\n plt.show()\n\n\n","sub_path":"lab3/scripts/experiment2_data1.py","file_name":"experiment2_data1.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"280129162","text":"\nimport ctypes\nimport logging\nimport os\nimport platform\nfrom lib.file_helper import FileHelper\nfrom lib.file import File\nfrom typing import List\n\n\nclass SystemHelper:\n\n def __init__(self, settings: dict):\n self._settings = settings\n\n def get_files(self) -> List[File]:\n pattern = self._settings['filename_with_date_pattern']\n names = os.listdir(self._settings['path_to_dir'])\n files = FileHelper.get_files_with_created_date(names, pattern)\n return files\n\n def delete_files(self, files: List[File]):\n for file in files:\n if self._settings['delete_files']:\n try:\n logging.info('delete file: %s' % file.name)\n os.remove('%s/%s' % (self._settings['path_to_dir'], file.name))\n except FileExistsError:\n logging.error('file does not exist: %s' % file.name)\n else:\n logging.info('[NO_DELETE_MODE] try to delete file: %s' % file.name)\n\n def get_disk_usage_in_percent(self):\n path = self._settings['path_to_dir']\n if platform.system() == 'Windows':\n available_bytes = ctypes.c_ulonglong(0)\n free_bytes = ctypes.c_ulonglong(0)\n\n ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(path),\n None,\n ctypes.pointer(available_bytes),\n ctypes.pointer(free_bytes))\n\n result = free_bytes.value / available_bytes.value\n else:\n st = os.statvfs(path)\n result = st.f_bfree/st.f_blocks\n\n return round((1 - result) * 100)\n","sub_path":"lib/system_helper.py","file_name":"system_helper.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"138798517","text":"# coding=UTF-8\n\n\nfrom django.urls import reverse_lazy\nfrom django.db.models import Q\nfrom django.utils.translation import ugettext as _\nfrom django.views import generic\n\nfrom app.forms.profile.profile_visibilite import ProfileVisibiliteForm\nfrom app.models.personne import Personne\nfrom app.models.personne_enums import PersonneEnums\nfrom app.views.common import LoginRequiredMixin\n\n\nclass ChangeVisibilityView(LoginRequiredMixin, generic.FormView):\n template_name = 'my_home/profile/base.html'\n form_class = ProfileVisibiliteForm\n success_url = reverse_lazy('my_home_profile_edit')\n\n def get(self, request, *args, **kwargs):\n retour = super(ChangeVisibilityView, self).get(request, *args, **kwargs)\n # (!) il ne calcule la vue que si nécessaire !\n # -> forcer à le faire *AVANT* de supprimer le message :\n retour.render()\n if self.request.session.get('message', None):\n del self.request.session['message']\n return retour\n\n def get_object(self):\n p = Personne.objects.filter(\n user__pk__exact=self.request.user.pk\n ).all()\n return p[0] if len(p) else None\n\n def form_valid(self, form):\n def local_bool(idx):\n return True if form.cleaned_data.get(idx, False) else False\n\n p = self.get_object()\n p.niveau_visibilite = form.cleaned_data.get(\n 'niveau_visibilite', PersonneEnums.VISIBILITE_TOUT_LE_MONDE)\n p.age_visible = local_bool('age_visible')\n p.nb_enfants_visible = local_bool('nb_enfants_visible')\n p.langue_visible = local_bool('langue_visible')\n p.langues2_visible = local_bool('langues2_visible')\n p.niveau_etudes_visible = local_bool('niveau_etudes_visible')\n p.programme_visible = local_bool('programme_visible')\n p.employer_current_visible = local_bool('employer_current_visible')\n p.employer_previous_visible = local_bool('employer_previous_visible')\n p.profession_visible = local_bool('profession_visible')\n p.activite_visible = local_bool('activite_visible')\n p.hobbies_visible = local_bool('hobbies_visible')\n p.conduite_visible = local_bool('conduite_visible')\n p.personnalite_visible = local_bool('personnalite_visible')\n p.est_fumeur_visible = local_bool('est_fumeur_visible')\n p.custom_zodiac_sign_visible = local_bool('custom_zodiac_sign_visible')\n p.self_description_visible = local_bool('self_description_visible')\n p.save()\n\n # Exemple de gestion d'erreur, mais ici c'est toujours ok :\n error = None\n if not error:\n # !! RESTE A FAIRE : mettre à jour le mot de passe\n self.request.session['message'] = (\n _(\"Account updated\"),\n _(\"Your visibility configuration has been changed.\"))\n else:\n self.request.session['message'] = (\n _(\"Your visibility configuration has not been changed:\"\n \"

{}\").format(error),\n _(\"Please try again\"))\n\n return super(ChangeVisibilityView, self).form_valid(form)\n#\n","sub_path":"app/views/my_home/profile/change_visibility.py","file_name":"change_visibility.py","file_ext":"py","file_size_in_byte":3135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"584523632","text":"import geopandas as gpd\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\ndef main():\n # Build Data Set\n df_e = df_electiricity()\n gdf_w = df_geometry()\n df_pop = df_population()\n\n # Display rows, columns and width in full\n pd.set_option('display.max_columns', None)\n pd.set_option('display.max_rows', None)\n pd.set_option('display.width', None)\n\n #Merge data\n gdf_w_QP = merge(df_pop, df_e, gdf_w)[0]\n top20_person = merge(df_pop, df_e, gdf_w)[1]\n top20_contry = merge(df_pop, df_e, gdf_w)[2]\n\n #Plotting\n plotting(gdf_w_QP, top20_person, top20_contry)\n\ndef df_electiricity():\n ### dataframe of electiricity of households 2018 ###\n filename1 = 'data/UNdata_Export_Consumption_by_households.csv'\n\n # Call only columns that are necessary\n df_e = pd.read_csv(filename1)[['Country or Area', 'Year', 'Quantity']] # Unit : Kilowatt-hours, million\n df_e = df_e[df_e.Year.eq(2018)]\n\n # Drop off unnecessary columns\n df_e = df_e.drop(columns=['Year'])\n\n # Rename Columns\n df_e.columns = ['country', 'quantity']\n\n df_e[\"country\"] = df_e[\"country\"].str.replace(\"Rep.\", \"Republic\", case=False, regex=False)\n df_e[\"country\"] = df_e[\"country\"].str.replace(\"Is.\", \"Islands\", case=False,regex=False)\n df_e[\"country\"] = df_e[\"country\"].str.replace(\"Dem.\", \"Democratic\", case=False, regex=False)\n df_e[\"country\"] = df_e[\"country\"].str.replace(\"St.\", \"Saint\", case=False, regex=False)\n df_e[\"country\"] = df_e[\"country\"].str.replace(\"Ppl's\", \"People's\", case=False, regex=False)\n df_e[\"country\"] = df_e[\"country\"].str.replace(\"Fed.\", \"Federal\", case=False, regex=False)\n\n df_e.replace(to_replace='Bolivia (Plur. State of)', value='Bolivia', inplace=True)\n df_e.replace(to_replace='Cabo Verde', value='Cape Verde', inplace=True)\n df_e.replace(to_replace='Czechia', value='Czech Republic', inplace=True)\n df_e.replace(to_replace='Micronesia (Fed. States of)', value='Micronesia, Federated States of', inplace=True)\n df_e.replace(to_replace=\"C??te d'Ivoire\", value=\"Cote d'Ivoire\", inplace=True)\n df_e.replace(to_replace=\"China, Hong Kong SAR\", value=\"Hong Kong\", inplace=True)\n df_e.replace(to_replace=\"Faeroe Islands\", value=\"Faroe Islands\", inplace=True)\n df_e.replace(to_replace=\"China, Macao SAR\", value=\"Macau\", inplace=True)\n df_e.replace(to_replace=\"Korea, DemocraticPeople's.Republic\", value=\"Korea, Democratic People's Republic of\", inplace=True)\n df_e.replace(to_replace=\"Russian Federation\", value=\"Russia\", inplace=True)\n df_e.replace(to_replace='Saint Kitts-Nevis', value='Saint Kitts and Nevis', inplace=True)\n df_e.replace(to_replace='State of Palestine', value='Palestine', inplace=True)\n df_e.replace(to_replace='Tanzania', value='United Republic of Tanzania', inplace=True)\n df_e.replace(to_replace='Venezuela (Bolivar. Republic)', value='Venezuela', inplace=True)\n df_e.replace(to_replace='Micronesia (Federal States of)', value='Micronesia, Federated States of', inplace=True)\n\n return df_e\n\n\ndef df_geometry():\n ### geopanda dataframe ###\n shapefile = 'data\\TM_WORLD_BORDERS-0.3/TM_WORLD_BORDERS-0.3.shp'\n\n # Read shapefile using Geopandas\n gdf = gpd.read_file(shapefile)[['NAME', 'ISO3', 'geometry']]\n\n # Rename columns.\n gdf.columns = ['country', 'country_code', 'geometry']\n\n gdf.replace(to_replace='Burma', value='Myanmar', inplace=True)\n gdf.replace(to_replace='The former Yugoslav Republic of Macedonia', value='North Macedonia', inplace=True)\n gdf.replace(to_replace='Libyan Arab Jamahiriya', value='Libya', inplace=True)\n gdf.replace(to_replace='Swaziland', value='Eswatini', inplace=True)\n\n drop_index = [108,169,212,216,218,219,225,226,229,232,233,234]\n for i in range(140,151):\n drop_index.append(int(i))\n for i in range(239,246):\n drop_index.append(int(i))\n drop_index.sort()\n\n gdf.drop(gdf.index[gdf['country'] == 'Tokelau'], inplace=True)\n gdf = gdf.drop(drop_index)\n\n # These countries are dropped due to non-existing data (elec consumpsion or population)\n # Martinique\n # Mayotte\n # Åland Islands\n # Norfolk Island\n # Cocos (Keeling) Islands\n # Antarctica\n # Bouvet Island\n # French Southern and Antarctic Lands\n # Heard Island and McDonald Islands\n # British Indian Ocean Territory\n # Christmas Island\n # United States Minor Outlying Islands\n # Reunion\n # Tokelau\n # Saint Vincent and the Grenadines\n # United States Virgin Islands\n # Wallis and Futuna Islands, Samoa\n # Svalbard\n # Saint Martin\n # Saint Barthelemy\n # Guernsey\n # Jersey\n # South Georgia South Sandwich Islands\n # Taiwan'''\n return gdf\n\ndef df_population():\n ### dataframe widipedia world population ###\n df_pop = pd.read_html('https://en.wikipedia.org/wiki/List_of_countries_by_population_(United_Nations)')[0] # [0] : list -> dataframe\n\n # Drop off unnecessary columns\n df_pop = df_pop.drop(columns=['UN continentalregion[4]',\n 'UN statisticalsubregion[4]',\n 'Population(1 July 2019)',\n 'Change'])\n # Rename the columns\n df_pop.columns = ['country', 'population']\n\n # delete remark such as [a]\n for index, row in df_pop.iterrows():\n if '[' and '(' in row['country']:\n int_index = row['country'].index('(')\n old_str = str(row['country'])\n new_str = str(row['country'][:(int_index-1)])\n df_pop.replace(to_replace=old_str, value=new_str, inplace=True)\n elif '[' in row['country']:\n old_str = str(row['country'])\n new_str = str(row['country'][:-3])\n df_pop.replace(to_replace=old_str, value=new_str, inplace=True)\n elif '(' in row['country']:\n int_index = row['country'].index('(')\n old_str = str(row['country'])\n new_str = str(row['country'][:(int_index-1)])\n df_pop.replace(to_replace=old_str, value=new_str, inplace=True)\n\n df_pop.replace(to_replace='Brunei', value='Brunei Darussalam', inplace=True)\n df_pop.replace(to_replace='DR Congo', value='Democratic Republic of the Congo', inplace=True)\n df_pop.replace(to_replace='Falkland Islands', value='Falkland Islands (Malvinas)', inplace=True)\n df_pop.replace(to_replace='F.S. Micronesia', value='Micronesia, Federated States of', inplace=True)\n df_pop.replace(to_replace='Iran', value='Iran (Islamic Republic of)', inplace=True)\n df_pop.replace(to_replace='Ivory Coast', value=\"Cote d'Ivoire\", inplace=True)\n df_pop.replace(to_replace='North Korea', value=\"Korea, Democratic People's Republic of\", inplace=True)\n df_pop.replace(to_replace='South Korea', value='Korea, Republic of', inplace=True)\n df_pop.replace(to_replace=\"Laos\", value=\"Lao People's Democratic Republic\", inplace=True)\n df_pop.replace(to_replace='State of Palestine', value='Palestine', inplace=True)\n df_pop.replace(to_replace='Moldova', value='Republic of Moldova', inplace=True)\n df_pop.replace(to_replace='Syria', value='Syrian Arab Republic', inplace=True)\n df_pop.replace(to_replace='São Tomé and Príncipe', value='Sao Tome and Principe', inplace=True)\n df_pop.replace(to_replace='East Timor', value='Timor-Leste', inplace=True)\n df_pop.replace(to_replace='Vietnam', value='Viet Nam', inplace=True)\n df_pop.replace(to_replace='Tanzania', value='United Republic of Tanzania', inplace=True)\n\n return df_pop\n\ndef merge(df_pop,df_e,gdf_w):\n gdf_join_pop = gdf_w.merge(df_pop,\n on='country',\n how='left')\n\n gdf_join_e_pop = gdf_join_pop.merge(df_e,\n on='country',\n how='left')\n\n # Insert Population data of Vatican City into gdf_join_e_pop\n gdf_join_e_pop.loc[gdf_join_e_pop.country == 'Holy See (Vatican City)', \"population\"] = 801\n\n gdf_join_e_pop['Q/P'] = gdf_join_e_pop.quantity/gdf_join_e_pop.population*1000000\n\n gdf_join_e_pop.sort_values(by=['Q/P'], inplace=True,ascending=False)\n gdf_join_e_pop_20 = gdf_join_e_pop.head(20)\n\n gdf_join_e_pop.sort_values(by=['quantity'], inplace=True, ascending=False)\n gdf_join_e_pop_20_q = gdf_join_e_pop.head(20)\n\n gdf_join_e_pop.sort_values(by=['quantity'], inplace=True,ascending=False)\n\n\n\n return gdf_join_e_pop, gdf_join_e_pop_20, gdf_join_e_pop_20_q\n\ndef plotting(gdf_w_QP, top20_person, top20_contry): #(gdf_join_e_pop, gdf_join_e_pop_20, gdf_join_e_pop_20_q):\n fig1, ax1 = plt.subplots()\n fig2, ax2 = plt.subplots()\n fig3, ax3 = plt.subplots()\n fig4, ax4 = plt.subplots()\n fig1.set_size_inches(8, 5)\n fig2.set_size_inches(8, 6)\n fig3.set_size_inches(8, 5)\n fig4.set_size_inches(8, 6)\n\n ax1.set_title('Households Electricity Consumption per Capita', fontsize=14)\n gdf_w_QP.plot(column='Q/P',\n ax=ax1,\n legend=True,\n cmap='gist_rainbow',\n legend_kwds={'label': \"Kilowatt-hours\",\n 'orientation': \"horizontal\"})\n\n ax3.set_title('Households Electricity Consumption per Country', fontsize=14)\n gdf_w_QP.plot(column='quantity',\n ax=ax3,\n legend=True,\n cmap='gist_rainbow',\n legend_kwds={'label': \"Kilowatt-hours\",\n 'orientation': \"horizontal\"})\n\n ax2.bar(top20_person['country'], top20_person['Q/P'], color='blue')\n ax2.set_xlabel('Country', fontsize=14)\n ax2.set_ylabel('Kilowatt-hours', fontsize=14)\n ax2.set_xticks(top20_person['country']) #This line of code prevents from error. 'set_xticks' has to come before set_'set_xticklabels'\n ax2.set_xticklabels(labels =top20_person['country'].tolist(), rotation = 80 ) #labels take list object\n ax2.set_title('Top 20 countries in Households Electiricity consumption per capita', fontsize=14)\n\n ax4.bar(top20_contry['country'], top20_contry['quantity'], color='blue')\n ax4.set_xlabel('Country', fontsize=14)\n ax4.set_ylabel('Kilowatt-hours', fontsize=14)\n ax4.set_xticks(top20_contry['country']) #This line of code prevents from error. 'set_xticks' has to come before set_'set_xticklabels'\n ax4.set_xticklabels(labels =top20_contry['country'].tolist(), rotation = 80 ) #labels take list object\n ax4.set_title('Top 20 countries in Households Electiricity consumption', fontsize=14)\n\n plt.tight_layout() #This line helps to show long names of countries(x axis)\n plt.show()\n \nif __name__ == '__main__':\n main()","sub_path":"ElectricityConsumptionPerCapita.py","file_name":"ElectricityConsumptionPerCapita.py","file_ext":"py","file_size_in_byte":11028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"74598024","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport ralph.lib.mixins.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('operations', '0008_auto_20170331_0952'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='operation',\n name='ticket_id',\n field=ralph.lib.mixins.fields.TicketIdField(null=True, max_length=200, verbose_name='ticket id', unique=True, blank=True, help_text='External system ticket identifier'),\n ),\n ]\n","sub_path":"src/ralph/operations/migrations/0009_auto_20170403_1112.py","file_name":"0009_auto_20170403_1112.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"233780181","text":"\"\"\" \r\n@author: lileilei\r\n@file: selsct.py \r\n@time: 2018/4/23 9:14 \r\n\"\"\"\r\nimport time,gevent\r\nfrom gevent import select\r\nstart=time.time()\r\ntic = lambda: 'at %1.1f seconds' % (time.time() - start)\r\ndef gr1():\r\n print('Started Polling: %s' % tic())\r\n select.select([], [], [], 1)#阻塞\r\n print('Ended Polling: %s' % tic())\r\ndef gr2():\r\n print('Started Polling: %s' % tic())\r\n select.select([], [], [], 1)#阻塞\r\n print('Ended Polling: %s' % tic())\r\ndef gr3():\r\n print(\"Hey lets do some stuff while the greenlets poll, %s\" % tic())\r\n gevent.sleep(1)#阻塞\r\ngevent.joinall([\r\n gevent.spawn(gr1),\r\n gevent.spawn(gr2),\r\n gevent.spawn(gr3),\r\n])","sub_path":"study/selsct.py","file_name":"selsct.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"512244989","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pygame as pg\r\n\r\npg.init()\r\n\r\nwin = pg.display.set_mode((2000, 2000))\r\npg.display.set_caption(\"Yaboi\")\r\n\r\nforce1 = 1000\r\nvelx1 = 0\r\nvely1 = 0\r\nwidth1 = 30\r\nheight1 = 30\r\ny1 = 235\r\nx1 = 200\r\nmass1 = width1 * height1\r\naccel1 = force1 / mass1\r\n\r\nforce2 = 1000\r\nvelx2 = 0\r\nvely2 = 0\r\nwidth2 = 30\r\nheight2 = 30\r\ny2 = 235\r\nx2 = 235\r\nmass2 = width2 * height2\r\naccel2 = force2 / mass2\r\n\r\nrun = True\r\nwhile run:\r\n pg.time.delay(50)\r\n\r\n for event in pg.event.get():\r\n if event.type == pg.QUIT:\r\n run = False\r\n\r\n keys = pg.key.get_pressed()\r\n if keys[pg.K_LEFT]:\r\n velx1 -= accel1\r\n if keys[pg.K_RIGHT]:\r\n velx1 += accel1\r\n if keys[pg.K_UP]:\r\n vely1 -= accel1\r\n if keys[pg.K_DOWN]:\r\n vely1 += accel1\r\n \r\n if x1 <= 0 or x1 >= 500:\r\n velx1 = -1 * velx1\r\n \r\n if y1 <= 0 or y1 >= 500:\r\n vely1 = -vely1\r\n\r\n x1 += velx1\r\n y1 += vely1\r\n\r\n keys = pg.key.get_pressed()\r\n if keys[pg.K_a]:\r\n velx2 -= accel2\r\n if keys[pg.K_d]:\r\n velx2 += accel2\r\n if keys[pg.K_w]:\r\n vely2 -= accel2\r\n if keys[pg.K_s]:\r\n vely2 += accel2\r\n \r\n if x2 <= 0 or x2 >= 500:\r\n velx2 = -1 * velx2\r\n\r\n if y2 <= 0 or y2 >= 500:\r\n vely2 = -1 * vely2\r\n\r\n x2 += velx2\r\n y2 += vely2\r\n\r\n\r\n win.fill((0, 0, 0))\r\n pg.draw.rect(win, (255, 0, 0), (x1, y1, width1, height1))\r\n pg.draw.rect(win, (255, 0, 0), (x2, y2, width2, height2))\r\n pg.display.update()\r\npg.quit()\r\n","sub_path":"accel.py","file_name":"accel.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"356230704","text":"# Copyright 2019 AstroLab Software\n# Author: Julien Peloton\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom pyspark.sql.functions import pandas_udf, PandasUDFType, col\nfrom pyspark.sql.types import BooleanType, StringType\n\nimport pandas as pd\nimport numpy as np\n\nfrom fink_broker.classification import cross_match_alerts_raw\nfrom fink_broker.classification import cross_match_alerts_raw_slow\n\nfrom typing import Any\n\n# Declare here the filters that will be applied in the\n# level one (raw -> science database)\nfilter_levelone_names = [\"qualitycuts\"]\n\n# Declare here the processors that will be applied in the\n# level one (stream -> raw database)\nprocessor_levelone_names = [\"cross_match_alerts_per_batch\"]\n\n@pandas_udf(BooleanType(), PandasUDFType.SCALAR)\ndef qualitycuts(nbad: Any, rb: Any, magdiff: Any) -> pd.Series:\n \"\"\" Apply simple quality cuts to the alert stream.\n\n The user will edit this function (or create a new filtering function)\n with his/her needs the following way:\n\n 1) Set the input entry column (i.e. replace nbad,\n rb, etc. by what you need). These must be `candidate` entries.\n 2) Update the logic inside the function. The idea is to\n apply conditions based on the values of the columns.\n 3) Return a column whose entry is false if the alert has to be discarded,\n and true otherwise.\n\n Parameters\n ----------\n nbad: Spark DataFrame Column\n Column containing the nbad values\n rb: Spark DataFrame Column\n Column containing the rb values\n magdiff: Spark DataFrame Column\n Column containing the magdiff values\n\n Returns\n ----------\n out: pandas.Series of bool\n Return a Pandas DataFrame with the appropriate flag: false for bad alert,\n and true for good alert.\n\n \"\"\"\n mask = nbad.values == 0\n mask *= rb.values >= 0.55\n mask *= abs(magdiff.values) <= 0.1\n\n return pd.Series(mask)\n\n@pandas_udf(StringType(), PandasUDFType.SCALAR)\ndef cross_match_alerts_per_batch(objectId: Any, ra: Any, dec: Any) -> pd.Series:\n \"\"\" Query the CDSXmatch service to find identified objects\n in alerts. The catalog queried is the SIMBAD bibliographical database.\n We can also use the 10,000+ VizieR tables if needed :-)\n\n I/O specifically designed for use as `pandas_udf` in `select` or\n `withColumn` dataframe methods\n\n The user will create a new processing function with his/her needs the\n following way:\n\n 1) Define the input entry column. These must be `candidate` entries.\n 2) Update the logic inside the function. The idea is to\n apply conditions based on the values of the columns.\n 3) Return a column with added value after processing\n\n Parameters\n ----------\n objectId: list of str or Spark DataFrame Column of str\n List containing object ids (custom)\n ra: list of float or Spark DataFrame Column of float\n List containing object ra coordinates\n dec: list of float or Spark DataFrame Column of float\n List containing object dec coordinates\n\n Returns\n ----------\n out: pandas.Series of string\n Return a Pandas DataFrame with the type of object found in Simbad.\n If the object is not found in Simbad, the type is\n marked as Unknown. In the case several objects match\n the centroid of the alert, only the closest is returned.\n If the request Failed (no match at all), return Column of Fail.\n\n \"\"\"\n # discriminate which service to use using the number of alerts\n # Issue 265: cross_match_alerts_raw_slow is buggy and need to be fixed.\n if len(ra) <= 1:\n matches = cross_match_alerts_raw_slow(\n objectId.values, ra.values, dec.values)\n else:\n matches = cross_match_alerts_raw(\n objectId.values, ra.values, dec.values)\n\n # For regular alerts, the number of matches is always non-zero as\n # alerts with no counterpart will be labeled as Unknown.\n # If cross_match_alerts_raw returns a zero-length list of matches, it is\n # a sign of a problem (logged).\n if len(matches) > 0:\n # (objectId, ra, dec, name, type)\n # return only the type.\n names = np.transpose(matches)[-1]\n else:\n # Tag as Fail if the request failed.\n names = [\"Fail\"] * len(objectId)\n return pd.Series(names)\n","sub_path":"userfilters/levelone.py","file_name":"levelone.py","file_ext":"py","file_size_in_byte":4805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"145390937","text":"from django.urls import path\r\n\r\nfrom roomapp.views import RoomCreateView, RoomUpdateView, RoomIndexView\r\n\r\napp_name = \"roomapp\"\r\n\r\nurlpatterns = [\r\n path('create/', RoomCreateView.as_view(), name='create'),\r\n path('update/', RoomUpdateView.as_view(), name='update'),\r\n path('list/', RoomIndexView.as_view(), name='list'),\r\n]","sub_path":"roomapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"648768422","text":"# Complete the selection_sort() function below in class with your instructor\ndef selection_sort(arr):\n # loop through n-1 elements\n for i in range(0, len(arr) - 1):\n cur_index = i\n smallest_index = cur_index\n # TO-DO: find next smallest element\n # (hint, can do in 3 loc)\n for j in range(i+1, len(arr)):\n if arr[j] < arr[smallest_index]:\n smallest_index = j\n\n # TO-DO: swap\n arr[smallest_index], arr[cur_index] = arr[cur_index], arr[smallest_index]\n\n return arr\n\n\n# print(selection_sort([1, 5, 8, 4, 2, 9, 6, 0, 3, 7]))\n\n# TO-DO: implement the Insertion Sort function below\n\n\ndef insertion_sort(arr):\n for i in range(0, len(arr)):\n cur_val = arr[i]\n pos = i\n while pos > 0 and cur_val < arr[pos-1]:\n print(arr)\n arr[pos], arr[pos-1] = arr[pos-1], arr[pos]\n pos -= 1\n\n arr[pos] = cur_val\n return arr\n\n\n# print(insertion_sort([1, 5, 8, 4, 2, 9, 6, 0, 3, 7]))\n\n# STRETCH: implement the Bubble Sort function below\n\n\ndef bubble_sort(arr):\n x = len(arr)\n while x > 0:\n for i in range(1, x):\n print(arr)\n if arr[i-1] > arr[i]:\n arr[i-1], arr[i] = arr[i], arr[i-1]\n x -= 1\n return arr\n\n\n# arr1 = [1, 5, 8, 4, 2, 9, 6, 0, 3, 7]\n# print(arr1)\n# print(\"bubble sort\", bubble_sort(arr1))\n\n# STRETCH: implement the Count Sort function below\n\n\ndef count_sort(arr, maximum=-1):\n\n return arr\n","sub_path":"project/iterative_sorting.py","file_name":"iterative_sorting.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"509107201","text":"# Copyright 2016 Red Hat, Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\nimport mock\n\nfrom tripleo_common.actions import plan\nfrom tripleo_common import exception\nfrom tripleo_common.tests import base\n\nMAPPING_YAML_CONTENTS = \"\"\"root_template: /path/to/overcloud.yaml\nroot_environment: /path/to/environment.yaml\ntopics:\n - title: Fake Single Environment Group Configuration\n description:\n environment_groups:\n - title:\n description: Random fake string of text\n environments:\n - file: /path/to/network-isolation.json\n title: Default Configuration\n description:\n\n - title: Fake Multiple Environment Group Configuration\n description:\n environment_groups:\n - title: Random Fake 1\n description: Random fake string of text\n environments:\n - file: /path/to/ceph-storage-env.yaml\n title: Fake1\n description: Random fake string of text\n\n - title: Random Fake 2\n description:\n environments:\n - file: /path/to/poc-custom-env.yaml\n title: Fake2\n description:\n\"\"\"\n\n\nclass CreateContainerActionTest(base.TestCase):\n\n def setUp(self):\n super(CreateContainerActionTest, self).setUp()\n self.container_name = 'test-container'\n self.expected_list = ['', [{'name': 'test1'}, {'name': 'test2'}]]\n\n @mock.patch('tripleo_common.actions.base.TripleOAction._get_object_client')\n def test_run(self, get_obj_client_mock):\n\n # Setup\n swift = mock.MagicMock()\n swift.get_account.return_value = self.expected_list\n get_obj_client_mock.return_value = swift\n\n # Test\n action = plan.CreateContainerAction(self.container_name)\n action.run()\n\n # Verify\n swift.put_container.assert_called_once_with(\n self.container_name,\n headers=plan.default_container_headers\n )\n\n @mock.patch('tripleo_common.actions.base.TripleOAction._get_object_client')\n def test_run_container_exists(self, get_obj_client_mock):\n\n # Setup\n swift = mock.MagicMock()\n swift.get_account.return_value = [\n '', [{'name': 'test-container'}, {'name': 'test2'}]]\n get_obj_client_mock.return_value = swift\n\n # Test\n action = plan.CreateContainerAction(self.container_name)\n\n self.assertRaises(exception.ContainerAlreadyExistsError, action.run)\n\n\nclass CreatePlanActionTest(base.TestCase):\n\n def setUp(self):\n super(CreatePlanActionTest, self).setUp()\n self.container_name = 'test-container'\n self.capabilities_name = 'capabilities-map.yaml'\n\n @mock.patch('tripleo_common.actions.base.TripleOAction._get_object_client')\n @mock.patch(\n 'tripleo_common.actions.base.TripleOAction._get_workflow_client')\n def test_run(self, get_workflow_client_mock, get_obj_client_mock):\n\n # setup swift\n swift = mock.MagicMock()\n swift.get_object.return_value = ({}, MAPPING_YAML_CONTENTS)\n get_obj_client_mock.return_value = swift\n\n # setup mistral\n mistral = mock.MagicMock()\n get_workflow_client_mock.return_value = mistral\n\n # Test\n action = plan.CreatePlanAction(self.container_name)\n action.run()\n\n # verify\n swift.get_object.assert_called_once_with(\n self.container_name,\n self.capabilities_name\n )\n\n mistral.environments.create.assert_called_once_with(\n name='test-container',\n variables=('{\"environments\":'\n ' [{\"path\": \"/path/to/environment.yaml\"}], '\n '\"template\": \"/path/to/overcloud.yaml\"}')\n )\n","sub_path":"tripleo_common/tests/actions/test_plan.py","file_name":"test_plan.py","file_ext":"py","file_size_in_byte":4229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"626118619","text":"#!/usr/bin/python\n\nfrom cortex.app import CortexFlask\nfrom datetime import timedelta\n\n################################################################################\n#### Default config options\n\n## Debug mode. This engages the web-based debug mode\nDEBUG = False\n\n## Enable the debug toolbar. DO NOT DO THIS ON A PRODUCTION SYSTEM. EVER. It exposes SECRET_KEY.\nDEBUG_TOOLBAR = False\n\n## Session signing key\n# Key used to sign/encrypt session data stored in cookies.\nSECRET_KEY = ''\n\n## File logging\nFILE_LOG=True\nLOG_FILE='cortex.log'\nLOG_DIR='/tmp'\nLOG_FILE_MAX_SIZE=1 * 1024 * 1024\nLOG_FILE_MAX_FILES=10\n\nEMAIL_ALERTS=False\nADMINS=['root']\nSMTP_SERVER='localhost'\nEMAIL_FROM='root'\nEMAIL_SUBJECT='Cortex Runtime Error'\nEMAIL_DOMAIN='localdomain'\n\n## Redis\nREDIS_HOST='localhost'\nREDIS_PORT=6379\n\n## MySQL\nMYSQL_HOST='localhost'\nMYSQL_USER='cortex'\nMYSQL_PW=''\nMYSQL_DB='cortex'\nMYSQL_PORT=3306\n\n## CMDB Integration\nCMDB_URL_FORMAT=\"http://localhost/cmdb/%s\"\n\n## Cortex internal version number\nVERSION_MAJOR='1.0'\nVERSION_MINOR='2016022206'\n\n## Flask defaults (changed to what we prefer)\nSESSION_COOKIE_SECURE = False\nSESSION_COOKIE_HTTPONLY = False\nPREFERRED_URL_SCHEME = 'http'\nPERMANENT_SESSION_LIFETIME = timedelta(days=7)\n\n## LDAP AUTH\nLDAP_URI = 'ldaps://localhost.localdomain'\nLDAP_SEARCH_BASE = ''\nLDAP_USER_ATTRIBUTE = 'sAMAccountName'\nLDAP_ANON_BIND = True\nLDAP_BIND_USER = ''\nLDAP_BIND_PW = ''\n\nLDAP_ADMIN_GROUP = \"CN=jfEstMembers,OU=resource,OU=jf,OU=jf,OU=pk,OU=User,DC=soton,DC=ac,DC=uk\"\n\n# Number of seconds, into the future, to cache user's group memberships for.\n# You probably don't want to change this.\nLDAP_GROUPS_CACHE_EXPIRE = '900'\n\n# Infoblox server\nINFOBLOX_HOST = \"\" \nINFOBLOX_USER = \"\"\nINFOBLOX_PASS = \"\"\n\n# ServiceNow instance\nSN_HOST = ''\nSN_USER = ''\nSN_PASS = ''\nCMDB_URL_FORMAT = 'https://myinstance.service-now.com/nav_to.do?uri=cmdb_ci_server.do?sys_id=%s'\nCMDB_CACHED_CLASSES={'cmdb_ci_server': 'Server'}\n\n# VMware configuration\nVMWARE={}\n\n# Neocortex is a daemon \nNEOCORTEX_KEY='changeme'\nWORKFLOWS_DIR='/data/cortex/workflows/'\n\n# Other\nENVIRONMENTS = []\n\n## API pre-shared keys\n# used by puppet master to get ENC data\nENC_API_AUTH_TOKEN = 'changeme'\n# used by all other API calls\nCORTEX_API_AUTH_TOKEN = 'changeme'\n\n# PuppetDB\nPUPPETDB_HOST=''\nPUPPETDB_PORT=8081\nPUPPETDB_SSL_VERIFY=False\nPUPPETDB_SSL_CERT=''\nPUPPETDB_SSL_KEY=''\n\n# Puppet Autosign server\nPUPPET_AUTOSIGN_URL='https://yourserver.tld/getcert'\nPUPPET_AUTOSIGN_KEY='changeme'\nPUPPET_AUTOSIGN_VERIFY=False\n\n# Red Hat Satellite Keys\nSATELLITE_KEYS = {\n\t'el7s' : {\n\t\t'development': 'changeme'\n\t}\n}\n\n################################################################################\n\n\n# initalise cortex\napp = CortexFlask(__name__)\n\n# load workflow modules\napp.load_workflows()\n\n# load cortex modules\n## nb. this is done so that decorators are processed, otherwise there is no need\n## to import them during application initalisation at all. \nimport cortex.lib.user\nimport cortex.errors\nimport cortex.admin\nimport cortex.views\nimport cortex.vmware\nimport cortex.systems\nimport cortex.puppet\nimport cortex.api\nimport cortex.register\nimport cortex.user\n\n\n","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"315179554","text":"import sqlalchemy\nimport sqlalchemy.ext.declarative\nimport sqlalchemy.orm\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy import Column\nfrom sqlalchemy import ForeignKey\nfrom sqlalchemy import String\n\nfrom lib.config_loader import config as conf\n\nengine = sqlalchemy.create_engine(\n conf.db.get('uri'),\n encoding=conf.db.get('encoding'),\n echo=conf.db.get('echo'))\nBase = sqlalchemy.ext.declarative.declarative_base()\n\n\nclass SubjectGroup(Base):\n __tablename__ = 'subjectGroups'\n Id = Column(\n sqlalchemy.Integer, primary_key=True, nullable=False, unique=True)\n Code = Column(String(50))\n CodeEx = Column(String(50))\n HideStd = Column(sqlalchemy.BOOLEAN)\n Name = Column(String(100))\n Order = Column(String(100))\n PictureUrl = Column(String(200))\n Type = Column(sqlalchemy.Integer)\n Year = Column(sqlalchemy.Integer)\n\n report = relationship('Report', backref=\"subjectGroups\")\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass Report(Base):\n __tablename__ = 'reports'\n TaskID = Column(\n sqlalchemy.Integer, primary_key=True, nullable=False, unique=True)\n DisplayDate = Column(sqlalchemy.String(50))\n GroupID = Column(\n sqlalchemy.Integer,\n ForeignKey('subjectGroups.Id', onupdate='CASCADE', ondelete='CASCADE'))\n GroupName = Column(String(50))\n IsResubmit = Column(sqlalchemy.BOOLEAN)\n PictureUrl = Column(String(200))\n Status = Column(sqlalchemy.Integer)\n SubmissionEnd = Column(String(50))\n TaskKind = Column(sqlalchemy.Integer)\n TaskType = Column(sqlalchemy.Integer)\n Title = Column(String(50))\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass Event(Base):\n __tablename__ = 'events'\n id = Column(\n sqlalchemy.Integer, primary_key=True, nullable=False, unique=True)\n allDay = Column(sqlalchemy.BOOLEAN)\n color = Column(String(15))\n description = Column(String(1000))\n endfortip = Column(String(50))\n groupname = Column(String(50))\n location = Column(String(50))\n senderid = Column(String(10))\n start = Column(sqlalchemy.Integer)\n end = Column(sqlalchemy.Integer)\n startfortip = Column(String(50))\n startfortip = Column(String(50))\n textColor = Column(String(20))\n title = Column(String(50))\n userid = Column(String(50))\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nBase.metadata.create_all(engine)\n\nSession = sqlalchemy.orm.sessionmaker(bind=engine)\ndb = Session()\n","sub_path":"lib/models/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"137990621","text":"\n\nfrom xai.brain.wordbase.nouns._wonder import _WONDER\n\n#calss header\nclass _WONDERING(_WONDER, ):\n\tdef __init__(self,): \n\t\t_WONDER.__init__(self)\n\t\tself.name = \"WONDERING\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"wonder\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_wondering.py","file_name":"_wondering.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"608247872","text":"import nltk, re, random, pymongo, itertools\nfrom nltk.tokenize import RegexpTokenizer\n\nMONGODB_SERVER = \"localhost\"\nMONGODB_PORT = 27017\nMONGODB_DB = \"buzzfeed\"\nMONGODB_COLLECTION = \"posts\"\n\n#setup pymongo\nconnection = pymongo.MongoClient(MONGODB_SERVER, MONGODB_PORT)\n\ndb = connection[MONGODB_DB]\ncollection = db[MONGODB_COLLECTION]\n\ntitles = []\nposts = collection.find({})\n\nfor post in posts:\n titles.append(post['post_title'].lower().lstrip())\n\nrand = random.randint(0, len(titles) - 100) \ntitles = titles[rand:rand+100]\n\nmin_title_length = 5\nmax_title_length = 5\nfor title in titles:\n length = len(title.split())\n\n if length > max_title_length:\n max_title_length = length\n\n if length < min_title_length:\n min_title_length = length\n\n#tokenize\ntokenizer = RegexpTokenizer(r\"\\w+[^\\w\\s]?\\w+\")\nstarting_words = []\ntokenized_titles = []\nfor title in titles:\n tokenized = tokenizer.tokenize(title)\n tokenized_titles.append(tokenized)\n\n#tag\n#tagged_titles = []\n#for title in tokenized_titles:\n# tagged = nltk.pos_tag(title)\n# tagged_titles.append(tagged)\n#\n#thefile = open('tagged_titles2.txt', 'w')\n#thefile.write(str(tagged_titles))\n#thefile.close()\n\ndef generate_model(cfd, word, num=random.randint(min_title_length, max_title_length)):\n generated_title = word + ' '\n for i in range(num):\n if (cfd[word]):\n word = random.choice(cfd[word].most_common(3))[0]\n generated_title += word + ' '\n else:\n break\n return generated_title\n\nall_bigrams = []\nfirst_words = []\nfor title in tokenized_titles:\n first_words.append(title[0])\n all_bigrams.append(nltk.bigrams(title))\n\nflat_all_bigrams = list(itertools.chain(*all_bigrams))\ncfd = nltk.ConditionalFreqDist(flat_all_bigrams) \n\n","sub_path":"tag_generator.py","file_name":"tag_generator.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"355477304","text":"#!/usr/bin/env python3\n\"\"\"\nUses epsilon-greedy to determine the next action\n\"\"\"\nimport numpy as np\n\n\ndef epsilon_greedy(Q, state, epsilon):\n \"\"\"\n Returns: the next action index\n \"\"\"\n p = np.random.uniform(0, 1)\n\n if p < epsilon:\n \"\"\"\n Explore: select a random action\n \"\"\"\n action = np.random.randint(Q.shape[1])\n else:\n \"\"\"\n Exploit: select the action with max value (future reward)\n \"\"\"\n action = np.argmax(Q[state, :])\n\n return action\n","sub_path":"reinforcement_learning/0x00-q_learning/2-epsilon_greedy.py","file_name":"2-epsilon_greedy.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"64236896","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('embarazos', '0011_auto_20150406_1514'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='embarazo',\n name='padre_apellido_materno',\n field=models.CharField(max_length=50, null=True, verbose_name=b'Padre apellido materno', blank=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='embarazo',\n name='padre_apellido_paterno',\n field=models.CharField(max_length=50, null=True, verbose_name=b'Padre apellido paterno', blank=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='embarazo',\n name='padre_nombres',\n field=models.CharField(max_length=50, null=True, verbose_name=b'Padre nombres', blank=True),\n preserve_default=True,\n ),\n ]\n","sub_path":"apps/embarazos/migrations/0012_auto_20150817_0955.py","file_name":"0012_auto_20150817_0955.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"338195916","text":"#!/usr/bin/env vtkpython\n\nimport vtk\n\ndata = vtk.vtkXMLImageDataReader()\ndata.SetFileName(\"fuel.vti\")\n\nplane = vtk.vtkPlane()\nplane.SetOrigin(32, 32, 32)\nplane.SetNormal(0, 0, -1)\n\nclip = vtk.vtkClipVolume()\nclip.SetClipFunction(plane)\nclip.SetInputConnection(data.GetOutputPort())\n\ncut = vtk.vtkCutter()\ncut.SetInputConnection(data.GetOutputPort())\ncut.SetCutFunction(plane)\n\niso = vtk.vtkContourFilter()\niso.SetInputConnection(cut.GetOutputPort())\niso.SetNumberOfContours(8)\niso.GenerateValues(8, 10, 255)\n\nlut = vtk.vtkLookupTable()\nlut.SetNumberOfColors(64)\nlut.SetHueRange(0.66,0)\nlut.SetValueRange(1,1)\nlut.SetSaturationRange(1,1)\nlut.Build()\n\nmapper = vtk.vtkPolyDataMapper()\nmapper.SetInputConnection(iso.GetOutputPort())\nmapper.SetScalarRange(0,255)\nmapper.SetLookupTable(lut)\nmapper.SetColorModeToMapScalars()\n\nactor = vtk.vtkActor()\nactor.SetMapper(mapper)\n\niso2 = vtk.vtkContourFilter()\niso2.SetInputConnection(clip.GetOutputPort())\niso2.ComputeNormalsOn()\niso2.SetNumberOfContours(1)\niso2.SetValue(0, 10)\n\nmapper2 = vtk.vtkPolyDataMapper()\nmapper2.SetInputConnection(iso2.GetOutputPort())\nmapper2.ScalarVisibilityOff()\n\nactor2 = vtk.vtkActor()\nactor2.SetMapper(mapper2)\nactor2.GetProperty().SetColor(0, 0, 1)\n\nren1 = vtk.vtkRenderer()\nren1.SetBackground(1, 1, 1)\nren1.AddActor(actor)\nren1.AddActor(actor2)\n\nrenWin = vtk.vtkRenderWindow()\nrenWin.AddRenderer(ren1)\nrenWin.SetSize(500, 500)\n\niren = vtk.vtkRenderWindowInteractor()\niren.SetRenderWindow(renWin)\n\nstyle = vtk.vtkInteractorStyleTrackballCamera()\niren.SetInteractorStyle(style)\n\niren.Initialize()\niren.Start()\n","sub_path":"compv_2/comp_home/archive/Composite.py","file_name":"Composite.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"169348381","text":"# ****************************************************************************\n# diffusion.py\n# by Walter Dal'Maz Silva\n# 08th March 2017\n# ****************************************************************************\n\nimport numpy as np\nimport scipy.integrate\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\n\n# ****************************************************************************\n# Unit conversion factors and constants\n# ****************************************************************************\n\npc = 1.0e-02 # [100/%] \nmm = 1.0e-03 # [m/mm]\nkJ = 1.0e+03 # [J/kJ]\nhr = 3.6e+03 # [s/h]\nRg = 8.314472 # [J/mole/K]\n \n# ****************************************************************************\n# function laplacian\n# ****************************************************************************\n\ndef laplacian(u):\n \"\"\" laplacian\n \n @param u a numpy.array of dimension (N,1)\n @return second derivative of u with dimension (N-2,1)\n \"\"\"\n return u[2:] - 2.0 * u[1:-1] + u[:-2]\n\n# ****************************************************************************\n# class diffusion\n# ****************************************************************************\n\nclass diffusion:\n def __init__(self, y0, yb, length, npoint, T, type='CONSTANT'):\n \"\"\" diffusion constructor: builds the physics of interstitial\n diffusion in solids. This class considers the carbon-iron\n system without taking into account the composition-dependent\n diffusion coefficient. Data taken from:\n J. Slycke and T. Ericsson\n J. Heat Treating (1981) No. 2 Vol. 2 p. 97 \n \n @param y0 initial mass fraction\n @param yb boundary condition mass fraction\n @param length system thickness in meters\n @param npoint number of points in discretization\n @param T system temperature in kelvin\n \"\"\"\n # Discretize space using double precision np.float128.\n # 'retstep' forces np.linspace to return the grid spacing.\n config_linspace = {'retstep': True, 'dtype': np.float64}\n self.x, dx = np.linspace(0.0, length, npoint, **config_linspace)\n \n # Attribute a composition for each point in the space.\n # Since we have a constant inicial mass fraction in this\n # problem it is possible to create an array of ones and\n # multiply this by the inicial concentration.\n self.y = y0 * np.ones(npoint, dtype=np.float64)\n self.yb = yb\n \n # Since this is a simple solver for constant temperature\n # and composition-independent diffusion coefficient, compute\n # its value only once here in the class constructor.\n D = 4.84e-05 * np.exp(- 155.0 * kJ /(Rg * T))\n \n # Apply boundary conditions after defining type.\n # Here we give and example of exception handling in Python.\n self.type = type if type in ['CONSTANT','CLOSED'] else None\n try:\n message = 'Unknow boundary type: {}'.format(type)\n assert self.type is not None, message\n except (AssertionError) as err:\n raise SystemExit(err)\n \n # This quantities appear in the finite difference discretization\n # and we compute it only once here.\n self.Fact = D / (dx * dx)\n\n \n def __call__(self, t, y):\n \"\"\" diffusion function object\n \n @param t independent variable, time in seconds here\n @param y current mass fraction of diffusing species\n @return right hand side of Fick's second law\n \"\"\"\n # Constant concentration boundary condition: here we are assuming\n # we have a plate with same condition on both surfaces. This approach\n # is interesting for considering real cases where we can face the\n # 'shock' of diffusing profiles.\n if self.type == 'CONSTANT':\n # Fixed boundary concentrations.\n y[0] = y[-1] = self.yb \n \n # Zero-valued derivatives in boundaries.\n rhs = np.hstack((0.0, laplacian(y) * self.Fact, 0.0))\n \n # Closed system boundary condition: assuming a virtual node outside\n # the system we can compute the time-derivative of the first node.\n if self.type == 'CLOSED':\n # Derivative on the first node.\n bn = 2.0 * (y[1] - y[0])\n \n # Symmetry on both boundaries.\n rhs = self.Fact * np.hstack((bn, laplacian(y), bn))\n \n # Return right-hand side.\n return rhs\n\n# ****************************************************************************\n# class integDiffusion\n# ****************************************************************************\n\nclass integDiffusion:\n def __init__(self, diff):\n \"\"\" integDiffusion constructor: use VODE to integrate the physics\n of a `diffusion` object.\n \n @param diff a diffusion object\n \"\"\"\n config = {'atol': 1.0e-12, 'rtol': 1.0e-06, 'method': 'bdf',\n 'with_jacobian': True, 'max_step': 10.0}\n\n self.diff = diff\n self.solver = scipy.integrate.ode(self.diff)\n self.solver.set_integrator('vode', **config)\n self.solver.set_initial_value(self.diff.y, 0.0)\n \n\n def advance(self, tend, tstep=10.0):\n \"\"\" advance: advance 'tend' seconds the solution of the\n related `diffusion` object.\n \n @param tend physical integration time in seconds\n @param tstep physical advance step in seconds\n @return the integrated diffusion object\n \"\"\"\n count = 0\n while self.solver.successful() and self.solver.t < tend:\n self.solver.integrate(self.solver.t + tstep)\n self.diff.y = self.solver.y\n if not count % 100:\n print('Step at time {:15.6e}'.format(self.solver.t))\n count += 1\n \n return self.diff\n \n# ****************************************************************************\n# __main__ body of the script\n# ****************************************************************************\n\nif __name__ == '__main__':\n y0 = 0.1 * pc # mass fraction initial\n yb = 1.4 * pc # mass fraction boundary\n T = 1173.0 # temperature [K]\n length = 3.0 * mm # system size [m]\n npoint = 300 # number of grid points\n\n # Create the 'raw material' with homogeneous profile.\n diff = diffusion(y0, yb, length, npoint, T, type='CONSTANT')\n \n # Create a solver and advance system state for 2 hours.\n solv = integDiffusion(diff)\n diff = solv.advance(2.0 * hr, 100.0)\n\n # Plot current state.\n plt.plot(diff.x/mm, diff.y/pc, label = 'Enriquecimento')\n \n # Change boundary type.\n diff.type = 'CLOSED'\n \n # Create a new solver and call it.\n diff = integDiffusion(diff).advance(3.0 * hr, 100.0)\n \n # Plot final state.\n plt.plot(diff.x/mm, diff.y/pc, label = 'Difusão')\n \n # Plot setup and save.\n ax = plt.subplot(1, 1, 1)\n ax_format = lambda z, p : format(z,'.1f').replace('.',',')\n ax.get_yaxis().set_major_formatter(ticker.FuncFormatter(ax_format))\n ax.get_xaxis().set_major_formatter(ticker.FuncFormatter(ax_format))\n \n plt.xlabel('Posição (mm)')\n plt.ylabel('Percentual em massa de carbono (%C)')\n plt.xlim(0.0, 1.4)\n plt.ylim(0.0, 1.4)\n plt.legend(loc=1)\n plt.savefig('diffusion-example.png', dpi=300)\n \n# ****************************************************************************\n# EOF\n# ****************************************************************************\n","sub_path":"python/diffusion/diffusion.py","file_name":"diffusion.py","file_ext":"py","file_size_in_byte":7739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"412314968","text":"#%%\nimport numpy as np\nimport cv2 as cv\nimport matplotlib.pyplot as plt\nimport os\n\nfaceCascade = cv.CascadeClassifier('haarcascades\\haarcascade_frontalface_default.xml')\n# Read image from your local file system\noriginal_image = cv.imread('001_expresion_frown_3.bmp',0)\n\nplt.figure(figsize=(20,10))\nplt.imshow(original_image,cmap='gray')\nplt.show()\n# Detect faces\nfaces = faceCascade.detectMultiScale(\noriginal_image,\nscaleFactor=1.1,\nminNeighbors=5,\nflags=cv.CASCADE_SCALE_IMAGE\n)\n# For each face\nfor (x, y, w, h) in faces: \n # Draw rectangle around the face\n sub_face = original_image[y:y+h, x:x+w]\n cv.imwrite(\"crop1.jpg\",sub_face)\n cv.rectangle(original_image, (x, y), (x+w, y+h), (255, 255, 255), 3)\n\nplt.figure(figsize=(20,10))\nplt.imshow(original_image,cmap='gray')\nplt.show()\n#%%","sub_path":"teste.py","file_name":"teste.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"415878480","text":"import sqlite3\nfrom time import sleep\nfrom datetime import datetime\nimport lcd\nimport dht\nimport os\n\nimport ControlFan\nimport ControlLED\nimport RPi.GPIO as GPIO\n\nconf = {}\n\ndef readConfig():\n conn = sqlite3.connect('conf.db')\n cur = conn.cursor()\n cur.execute('SELECT * FROM Config')\n row = cur.fetchone()\n conf['auto'], conf['LED'], conf['fan'], conf['start'], conf['end'], conf['pivot'] = row\n\ndef updateConfig(auto=None, LED=None, fan=None, start=None, end=None, pivot=None):\n pass\n \n\ntry:\n while True:\n readConfig()\n hud, temp = dht.getData()\n lcd.lcd_string(\"Auto: \" + conf['auto'], lcd.LCD_LINE_1)\n lcd.lcd_string(\"Temp: \" + str(temp) + \" C\", lcd.LCD_LINE_2)\n if conf['auto'] == 'On':\n if temp >= conf['pivot']:\n if ControlFan.getState() == 0:\n os.system('python3 /home/pi/SmartHome/ControlFan.py on')\n print('Turned on')\n else:\n if ControlFan.getState() == 1:\n os.system('python3 /home/pi/SmartHome/ControlFan.py off')\n print('Turned off')\n now = datetime.now().strftime('%H:%M')\n if conf['start'] < conf['end']:\n if conf['start'] <= now <= conf['end']:\n if ControlLED.getState() == 0:\n os.system('python3 /home/pi/SmartHome/ControlLED.py on')\n else:\n if ControlLED.getState() == 1:\n os.system('python3 /home/pi/SmartHome/ControlLED.py off')\n else:#Over night\n if now < conf['end'] or now > conf['start']:\n if ControlLED.getState() == 0:\n os.system('python3 /home/pi/SmartHome/ControlLED.py on')\n else:\n if ControlLED.getState() == 1:\n os.system('python3 /home/pi/SmartHome/ControlLED.py off')\n sleep(1)\nexcept KeyboardInterrupt:\n lcd.lcd_byte(0x01, lcd.LCD_CMD)\n \n","sub_path":"auto.py","file_name":"auto.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"478952254","text":"#!/bin/python3\n\ndef testpalind(word):\n word = word.lower()\n i,j=0, len(word) - 1\n for l in word:\n print ('i : j', i, j)\n if word[i] == word[j]:\n i += 1\n j -= 1\n else:\n return False\n if i == j or j < i:\n return True\n\nif __name__=='__main__':\n print(testpalind(input()))\n\n","sub_path":"hackerrank/palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"197077482","text":"# -*- coding:utf-8 -*-\n# python version= python3.X\n# code lines count about 70\nimport os\nfrom threading import Thread\nfrom customTools.respDownloader import parse_target_urls, get_element\nfrom customTools.disguiser import user_agent\nfrom customTools.loggerHome import novellogger\nfrom customTools.databaseHome import novel_redis_client\nfrom customTools.queueHome import chuangShiNovelUrlQueue, chuangShiELementQueue, chuangShiDataQueue\nfrom dataHandler.NovelDataHandler_chuangShi import chuangShi_data_handler\n\nclass ChuangShiNovelSpider(object):\n \"\"\"\n this is a spiders class for novel information by chang shi wen xue website\n \"\"\"\n\n def __init__(self):\n self.start_url = \"http://chuangshi.qq.com/bang/tj/kh-week.html\"\n\n def get_urls(self):\n \"\"\"\n by parse start url to get detail urls for each book\n :return: detail url list\n \"\"\"\n element = get_element(targetUrl=self.start_url, workLogger=novellogger, headers={\"User-Agent\": user_agent()})\n trs = element.xpath('.//tbody[@id=\"rankList\"]//tr')[1:]\n for tr in trs:\n link = tr.xpath(\".//a[@target='_blank']/@href\")\n if len(link) > 0:\n chuangShiNovelUrlQueue.put(link[0])\n\n def parse_detail_url(self):\n\n \"\"\"\n :param urlQueue: to give target url\n :param elementQueue: to save the element of the page by parse url\n :param workLogger: to record the work information\n :param redisClient: save url finger print\n :param redisKey: the redis data save key for url finger print\n :return: None\n \"\"\"\n while True:\n # every time to parse url use different User-Agent\n headers = {\"User-Agent\": user_agent()}\n\n # use function import from customTools.respDownloader\n parse_target_urls(urlQueue=chuangShiNovelUrlQueue,\n elementQueue=chuangShiELementQueue,\n headers=headers,\n workLogger=novellogger,\n redisClient=novel_redis_client,\n redisKey=\"novel_url_finger\")\n # print(\"url queue size:\", chuangShiNovelUrlQueue.qsize())\n\n def run(self):\n\n # parse start url and get detail urls ,add this detail urls into url queue\n self.get_urls()\n # try parse all detail url and add target element into element queue\n for i in range(10):\n t = Thread(target=self.parse_detail_url)\n t.setDaemon(True)\n t.setName(\"chuangShiUrlParse{}\".format(i))\n t.start()\n # start data handlers\n chuangShi_data_handler()\n\n # join all queue\n for q in [chuangShiNovelUrlQueue, chuangShiELementQueue, chuangShiDataQueue]:\n q.join()\n\n\nif __name__ == '__main__':\n chuangshi = ChuangShiNovelSpider()\n chuangshi.run()\n\n","sub_path":"novelSpider/chuangshi.py","file_name":"chuangshi.py","file_ext":"py","file_size_in_byte":3030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"97078186","text":"import argparse\nimport logging\nimport os\n\nimport pymia.deeplearning.logging as log\n\nimport mialab.configuration.config as cfg\nimport mialab.data.handler as hdlr\nimport mialab.data.split as split\nimport mialab.model.factory as mdl\nimport mialab.utilities.filesystem as fs\nimport mialab.utilities.training as train\n\n\ndef main(config_file: str):\n config = cfg.load(config_file, cfg.Configuration)\n\n # set up directories and logging\n model_dir, result_dir = fs.prepare_directories(config_file, cfg.Configuration,\n lambda: fs.get_directory_name(config))\n config.model_dir = model_dir\n config.result_dir = result_dir\n print(config)\n\n logging.basicConfig(filename=os.path.join(config.model_dir, 'logging.log'), level=logging.INFO, filemode='a')\n\n # load train and valid subjects from split file (also test but it is unused)\n subjects_train, subjects_valid = split.load_split(config.split_file)\n print('Train subjects:', subjects_train)\n print('Valid subjects:', subjects_valid)\n\n # set up data handling\n data_handler = hdlr.SliceWiseDataHandler(config, subjects_train, subjects_valid, None)\n\n # extract a sample for model initialization\n data_handler.dataset.set_extractor(data_handler.extractor_train)\n data_handler.dataset.set_transform(data_handler.extraction_transform_train)\n sample = data_handler.dataset[0]\n\n model = mdl.get_model(config)(sample, config)\n logger = log.TorchLogger(config.model_dir,\n model.epoch_summaries(), model.batch_summaries(), model.visualization_summaries())\n\n trainer = train.SegmentationTrainer(data_handler, logger, config, model)\n trainer.train()\n\n\nif __name__ == '__main__':\n \"\"\"The program's entry point.\n\n Parse the arguments and run the program.\n \"\"\"\n\n parser = argparse.ArgumentParser(description='Deep learning for magnetic resonance fingerprinting')\n\n parser.add_argument(\n '--config_file',\n type=str,\n default='./config/config.json',\n help='Path to the configuration file.'\n )\n\n args = parser.parse_args()\n main(args.config_file)\n","sub_path":"bin/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"458299640","text":"import csv\nimport numpy as np\n\n\ndef load_data(filename):\n \"\"\"\n Loads data from given file\n\n Parameters:\n filename (string): file to load\n\n Returns:\n array: data\n \"\"\"\n\n with open(filename, 'r') as f:\n reader = csv.reader(f, delimiter=',')\n data = list(reader)\n data = np.array(data).astype(\"float\")\n\n return data","sub_path":"neuralnetwork/scripts/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"156052780","text":"A = int(input())\nB = int(input())\nC = int(input())\nD = int(input())\nE = int(input())\ns1 = A * B\ns2 = A * C\ns3 = C * B\ns4 = D * E\n\nf1 = (s1 <= s4)\nf2 = (s2 <= s4)\nf3 = (s3 <= s4)\nff1 = ((A <= D) and (B <= E))\nff2 = ((A <= D) and (A <= E))\nff3 = ((A <= D) and (C <= E))\nff4 = ((C <= D) and (A <= E))\nff5 = ((B <= D) and (C <= E))\nff6 = ((C <= D) and (B <= E))\n\nif (f1 or f2 or f3) and (ff1 or ff2 or ff3 or ff4 or ff5 or ff6):\n print('YES')\nelse:\n print('NO')\n","sub_path":"Python_MINNE/Course_Coursera_Python_basics/PycharmProjectss/ass2_16.py","file_name":"ass2_16.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"474838269","text":"#!usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport random\nfrom pathlib import Path\nimport warnings\nimport json\nfrom PIL import Image, ImageFile\nfrom tqdm import tqdm\n\nimport torch\nfrom torchvision.datasets import VisionDataset\nfrom .utils import download_url, download_urls, get_fname\n\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\n\nclass OpenFire(VisionDataset):\n \"\"\"Wildfire image Dataset.\n\n Args:\n root (string): Root directory of dataset where ``OpenFire/processed/training.pt``\n and ``OpenFire/processed/test.pt`` exist.\n train (bool, optional): If True, creates dataset from ``training.pt``,\n otherwise from ``test.pt``.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n download (bool, optional): If true, downloads the dataset from the internet and\n puts it in root directory. If dataset is already downloaded, it is not\n downloaded again.\n threads (int, optional): If download is set to True, use this amount of threads\n for downloading the dataset.\n valid_pct (float, optional): Percentage of training set used for validation.\n \"\"\"\n\n url = 'https://gist.githubusercontent.com/frgfm/f53b4f53a1b2dc3bb4f18c006a32ec0d/raw/99e5be2afd957b2da841f0adf8c5dfa47fe57166/openfire_binary.json'\n training_file = 'training.pt'\n test_file = 'test.pt'\n classes = [False, True]\n seed = 42\n\n def __init__(self, root, train=True, transform=None, target_transform=None,\n download=False, threads=16, valid_pct=None):\n super(OpenFire, self).__init__(root, transform=transform,\n target_transform=target_transform)\n self.train = train # training set or test set\n\n if download:\n self.download(threads, valid_pct)\n\n if not self._check_exists(train):\n raise RuntimeError('Dataset not found.' +\n ' You can use download=True to download it')\n\n if self.train:\n data_file = self.training_file\n else:\n data_file = self.test_file\n self.data = torch.load(self._root.joinpath(self._processed, data_file))\n\n def __getitem__(self, idx):\n \"\"\" Getter function\n\n Args:\n index (int): Index\n Returns:\n img (torch.Tensor): image tensor\n target (int): dictionary of bboxes and labels' tensors\n \"\"\"\n\n # Load image\n img = Image.open(self._root.joinpath(self.data[idx]['path']), mode='r').convert('RGB')\n # Load bboxes & encode label\n target = self.data[idx]['target']\n if self.transforms is not None:\n img, target = self.transforms(img, target)\n\n return img, target\n\n @property\n def _root(self):\n return Path(self.root)\n\n @property\n def _raw(self):\n return Path(self.__class__.__name__, 'raw')\n\n @property\n def _processed(self):\n return Path(self.__class__.__name__, 'processed')\n\n @property\n def class_to_idx(self):\n return {_class: i for i, _class in enumerate(self.classes)}\n\n def _check_exists(self, train=True):\n if train:\n return self._root.joinpath(self._processed, self.training_file).is_file()\n else:\n return self._root.joinpath(self._processed, self.test_file).is_file()\n\n def download(self, threads=None, valid_pct=None):\n \"\"\"Download the OpenFire data if it doesn't exist in processed_folder already.\n\n Args:\n threads (int, optional): Number of threads to use for dataset downloading.\n valid_pct (float, optional): Percentage of training set used for validation.\n \"\"\"\n\n if self._check_exists(train=True) and self._check_exists(train=False):\n return\n\n self._root.joinpath(self._raw).mkdir(parents=True, exist_ok=True)\n self._root.joinpath(self._processed).mkdir(parents=True, exist_ok=True)\n\n # Download annotations\n download_url(self.url, self._root.joinpath(self._raw), filename=self.url.rpartition('/')[-1], verbose=False)\n with open(self._root.joinpath(self._raw, self.url.rpartition('/')[-1]), 'rb') as f:\n annotations = json.load(f)\n\n # Download actual images\n training_set, test_set = [], []\n img_folder = self._root.joinpath(self._raw, 'images')\n img_folder.mkdir(parents=True, exist_ok=True)\n unavailable_idxs = 0\n # Prepare URL and filenames for multi-processing\n entries = [(a['url'], f\"{idx:06}.{get_fname(a['url']).rpartition('.')[-1]}\")\n for idx, a in enumerate(annotations)]\n # Use multiple threads to speed up download\n download_urls(entries, img_folder, threads=threads)\n # Verify downloads\n for idx, annotation in enumerate(annotations):\n img_path = self._raw.joinpath('images', entries[idx][1])\n if self._root.joinpath(img_path).is_file():\n # Encode target\n target = self.class_to_idx[annotation['target']]\n # Aggregate img path and annotations\n data = dict(path=img_path, target=target)\n # Add it to the proper set\n if annotation.get('is_test', False):\n test_set.append(data)\n else:\n training_set.append(data)\n else:\n unavailable_idxs += 1\n # HTTP Errors\n if unavailable_idxs > 0:\n warnings.warn((f'{unavailable_idxs}/{len(annotations)} samples could not be downloaded. Please retry later.'))\n\n # Override current train/test split\n if isinstance(valid_pct, float):\n full_set = training_set + test_set\n # Local seed to avoid disturbing global functions\n random.Random(self.seed).shuffle(full_set)\n valid_size = int(valid_pct * len(full_set))\n training_set, test_set = full_set[:-valid_size], full_set[-valid_size:]\n\n # save as torch files\n with open(self._root.joinpath(self._processed, self.training_file), 'wb') as f:\n torch.save(training_set, f)\n # in case test split if not available\n if len(test_set) > 0:\n with open(self._root.joinpath(self._processed, self.test_file), 'wb') as f:\n torch.save(test_set, f)\n else:\n warnings.warn(\"Unable to find train/test split! All samples were assigned to train set.\")\n\n print('Done!')\n\n def __len__(self):\n return len(self.data)\n\n def extra_repr(self):\n return \"Split: {}\".format(\"Train\" if self.train is True else \"Test\")\n","sub_path":"pyronear/datasets/openfire.py","file_name":"openfire.py","file_ext":"py","file_size_in_byte":6904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"113857494","text":"import kashgari\nimport argparse\nimport os\nfrom dirs import keras_models_dir\n\nparser = argparse.ArgumentParser(description=\"your script description\") \nparser.add_argument('--model_name', '-n', required=True, type=str)\nargs = parser.parse_args()\n\nmodel_name = args.model_name\nmodel_path = os.path.join(keras_models_dir, model_name)\n\nloaded_model = kashgari.utils.load_model(model_path)\nloaded_model.tf_model.summary()\nwhile True:\n text = input('sentence: ')\n r = loaded_model.predict([[char for char in text]])\n print(r)\n per, loc, org = '', '', ''\n\n for i, t in enumerate(r[0]):\n if t in ('B-PER', 'I-PER', '\\tI-PER'):\n per += ',' + text[i] if (t == 'B-PER' and not per == '') else text[i]\n if t in ('B-ORG', 'I-ORG'):\n org += ',' + text[i] if (t == 'B-ORG' and not org == '') else text[i]\n if t in ('B-LOC', 'I-LOC'):\n loc += ',' + text[i] if (t == 'B-LOC' and not loc == '') else text[i]\n\n print(['person: ' + per, 'location: ' + loc, 'organzation: ' + org])\n","sub_path":"model_predict.py","file_name":"model_predict.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"66269153","text":"from api_models.sk_metro_area import MetroArea\nfrom api_models.sk_artist import Artist\nfrom api_models.sk_city import City\nfrom api_models.sk_event import Event\nfrom api_models.sk_location import Location\nfrom api_models.sk_performance import Performance\nfrom api_models.sk_simplified_venue import SimplifiedVenue\nfrom api_models.sk_venue import Venue\nimport logging\n\nlog = logging.getLogger()\n\nclass Factory(object):\n\n def __init__(self):\n self.event_count = 0\n self.venue_count = 0\n self.location_count = 0\n self.artist_count = 0\n self.performance_count = 0\n self.city_count = 0\n self.metro_count = 0\n\n\n\n def build_artist(self, artist_data: dict) -> Artist or None:\n try:\n new_artist = Artist(\n display_name = artist_data['displayName'],\n uri = artist_data['uri'],\n sk_id = artist_data['id'])\n self.artist_count += 1\n return new_artist\n except Exception as exc:\n log.log(msg=f'Exception {exc} building artist with: {artist_data}', level=Warning)\n return None\n\n\n\n def build_city(self, city_data: dict) -> City or None:\n try:\n new_city = City(\n country = city_data['country']['displayName'],\n display_name = city_data['displayName'],\n sk_id = city_data['id'],\n uri = city_data['uri'])\n self.city_count += 1\n return new_city\n except Exception as exc:\n log.log(msg=f'Exception {exc} building city with: {city_data}', level=Warning)\n return None\n\n\n\n def build_event(self, event_data: dict) -> Event or None:\n try:\n event_location = self.build_location(event_data['location'])\n event_venue = self.build_simplified_venue(event_data['venue'])\n # event_perf_list = self.build_performances(event_data['performance'])\n new_event = Event(\n display_name = event_data['displayName'],\n event_type = event_data['type'],\n location = event_location,\n # performance = event_perf_list,\n popularity = event_data['popularity'],\n sk_id = event_data['id'],\n start = event_data['start'],\n status = event_data['status'],\n uri = event_data['uri'],\n venue = event_venue)\n self.event_count += 1\n return new_event\n except Exception as exc:\n log.log(msg=f'Exception {exc} building event', level=Warning)\n return None\n\n\n\n def build_location(self, location_data: dict) -> Location or None:\n try:\n new_location = Location(\n city = location_data['city'],\n lat = location_data['lat'],\n lng = location_data['lng'])\n self.location_count += 1\n return new_location\n except Exception as exc:\n log.log(msg=f'Exception {exc} building location with: {location_data}', level=Warning)\n return None\n\n\n\n def build_metro_area(self, metro_area_data: dict) -> MetroArea or None:\n try:\n new_metro_area = MetroArea(\n sk_id = metro_area_data['id'],\n uri = metro_area_data['uri'],\n display_name = metro_area_data['displayName'],\n country = metro_area_data['country'])\n self.metro_count += 1\n return new_metro_area\n except Exception as exc:\n log.log(msg=f'Exception {exc} building metro area with: {metro_area_data}', level=Warning)\n return None\n\n\n\n def build_performances(self, performance_data: [{}]) -> [Performance]:\n performance_list = []\n for performance in performance_data:\n try:\n new_performance = Performance(\n artist = self.build_artist(performance_data['artist']),\n billing = performance['billing'],\n billing_index = performance['billingIndex'],\n display_name = performance['displayName'],\n sk_id = performance['id'])\n performance_list.append(new_performance)\n self.performance_count += 1\n except Exception as exc:\n log.log(msg=f'Exception {exc} building performance with: {performance}', level=Warning)\n pass\n return performance_list\n\n\n\n def build_simplified_venue(self, simp_venue_data: dict) -> SimplifiedVenue or None:\n try:\n new_simplified_venue = SimplifiedVenue(\n display_name = simp_venue_data['displayName'],\n sk_id = simp_venue_data['id'],\n uri = simp_venue_data['uri'],\n metro_area = simp_venue_data['metroArea'] if simp_venue_data['metroArea'] else None)\n self.venue_count += 1\n return new_simplified_venue\n except Exception as exc:\n log.log(msg=f'Exception {exc} building simplified venue with {simp_venue_data}', level=Warning)\n return None\n\n\n\n def build_venue(self, venue_data: dict) -> Venue or None:\n description = venue_data['description'] if venue_data.keys().__contains__('description') else None\n lat = venue_data['lat'] if venue_data.keys().__contains__('lat') else None\n lng = venue_data['lng'] if venue_data.keys().__contains__('lng') else None\n phone = venue_data['phone'] if venue_data.keys().__contains__('phone') else None\n street = venue_data['street'] if venue_data.keys().__contains__('street') else None\n venue_capacity = venue_data['capacity'] if venue_data.keys().__contains__('capacity') else 0\n website = venue_data['website'] if venue_data.keys().__contains__('website') else None\n zip_code = venue_data['zip'] if venue_data.keys().__contains__('zip') else None\n\n new_location = None\n if venue_data.keys().__contains__('city'):\n new_location = Location(city=venue_data['city'], lng=lng, lat=lat)\n elif venue_data.keys().__contains__('location'):\n if venue_data['location'].keys().__contains__('city'):\n new_location = Location(city=venue_data['location']['city'], lng=lng, lat=lat)\n\n try:\n new_venue = Venue(\n capacity = venue_capacity,\n description = description,\n display_name = venue_data['displayName'],\n location = new_location,\n phone_num = phone,\n sk_id = venue_data['id'],\n street = street,\n uri = venue_data['uri'],\n website = website,\n zip_code = zip_code)\n self.venue_count += 1\n return new_venue\n except Exception as exc:\n log.log(msg=f'Exception {exc} building Venue with: {venue_data}', level=Warning)\n return None\n\n\n @property\n def total_objects(self):\n return self.artist_count + self.performance_count + self.location_count + \\\n self.event_count + self.venue_count + self.metro_count\n\n\n\n\n\n\n\n","sub_path":"sk_factory.py","file_name":"sk_factory.py","file_ext":"py","file_size_in_byte":7609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"506082086","text":"import scrapy\nimport html2text\nf = open(\"/Users/skye/result.txt\",\"w\")\nclass PTTSpider(scrapy.Spider):\n\tname = \"ptt\"\n\n\tdef start_requests(self):\n\t\turls = ['https://www.ptt.cc/bbs/lesbian/index.html']\n\t\tfor url in urls:\n\t\t\tyield scrapy.Request(url=url,callback=self.parse)\n\n\tdef parse(self,response):\n\t\t#data = response.css('title::text').extract()[0]\n\t\tarticles = response.css(\"div.r-ent\")\n\t\tfor article in articles:\n\t\t\t# get title\n\t\t\ttitle = article.css(\"div.title\")\n\t\t\ttitle = title.css(\"a::text\").extract_first()\n\t\t\t# get date\n\t\t\tmeta = article.css(\"div.meta\")\n\t\t\tdate = meta.css(\"div.date::text\").extract_first()\n\t\t\t# get author\n\t\t\tauthor = meta.css(\"div.author::text\").extract_first()\n\n\t\t\t#print(title,date,author)\n\t\t\t\n\t\t\t#f.write(str(date))\n\t\t\t#f.write(str(title))\n\t\t\t#f.write(str(author))\n\t\t\t# get url\n\t\t\ttitle = article.css(\"div.title\")\n\t\t\turl = title.css(\"a::attr(href)\").extract_first()\n\t\t\turl = 'https://www.ptt.cc' + url\n\t\t\tyield scrapy.Request(url, callback=self.parse_content)\n\t\t\t#print(content)\n\t\t#with open(\"/Users/skye/result.txt\",\"w\") as f:\n\t\t\t\t\n\tdef parse_content(self,response):\n\t\tcontent = response.xpath('//div[@id=\"main-content\"]')\n\t\tconverter = html2text.HTML2Text()\n\t\tconverter.ignore_links = True\n\t\t#print(converter.handle(content.extract()[0]))\n\t\tf.write(converter.handle(content.extract()[0]))","sub_path":"ptt/ptt/spiders/ptt_spider.py","file_name":"ptt_spider.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"364248239","text":"from random import randrange\r\n\r\n\r\ndef fisher_yates_shuffle(deck):\r\n '''\r\n Fisher-Yates shuffle: produce a permutation of an array. All permutations\r\n are sampled uniformly at random. Modern implementation by Richard\r\n Durstenfeld. Runtime is O(n), space is O(1) since in-place.\r\n '''\r\n if deck is None:\r\n return\r\n\r\n # the important check is to take care of the index range;\r\n # we want to avoid potential bias\r\n n = len(deck)\r\n for i in range(n-1):\r\n # correct index: 0 to n-i-1, leaving the first i-1 items as\r\n # scratch space\r\n swap_index = randrange(n-i)\r\n # swap items\r\n deck[i], deck[i+swap_index] = deck[i+swap_index], deck[i]\r\n\r\n return deck\r\n\r\ndef fisher_yates_sample(deck, m):\r\n '''\r\n Fisher-Yates sample: produce a subsampled permutation of an array.\r\n All permutations are sampled uniformly at random. This is akin to\r\n reservoir sampling.\r\n '''\r\n if deck is None:\r\n return\r\n\r\n if m < 0:\r\n print('Subarray size must be non-negative')\r\n return\r\n\r\n # the important check is to take care of the index range;\r\n # we want to avoid potential bias\r\n n = len(deck)\r\n t = min(m, n)\r\n\r\n for i in range(t-1):\r\n # correct index\r\n swap_index = randrange(n-i)\r\n # swap items\r\n deck[i], deck[i+swap_index] = deck[i+swap_index], deck[i]\r\n\r\n return deck[:t]\r\n\r\n\r\nif __name__ == \"__main__\":\r\n deck_cards = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\r\n print(fisher_yates_shuffle(deck_cards))\r\n\r\n print(fisher_yates_sample(deck_cards, 5))","sub_path":"practice/fisher_yates.py","file_name":"fisher_yates.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"169169959","text":"#/* *** ODSATag: GraphL *** */\nclass Edge:\n # Doubly linked list node\n def __init__(self, v, w, p, n):\n self.vertex = v\n self.weight = w\n self.prev = p\n self.next = n\n\n\nclass GraphL(Graph):\n def __init__(self, n):\n self.nodeArray = [Edge(-1, -1, None, None) for i in range(n)]\n self.nodeValues = [None] * n\n self.numEdge = 0\n\n def nodeCount(self):\n return self.nodeArray.length\n\n def edgeCount(self):\n return self.numEdge\n\n def getValue(self, v):\n return self.nodeValues[v]\n\n def setValue(self, v, val):\n self.nodeValues[v] = val\n \n def find (self, v, w):\n curr = self.nodeArray[v]\n while curr.next is not None and curr.next.vertex < w:\n curr = curr.next\n return curr\n\n def addEdge(self, v, w, wgt):\n if wgt == 0:\n raise ValueError(\"Can't store weight of 0\")\n curr = self.find(v, w)\n if curr.next is not None and curr.next.vertex == w:\n curr.next.weight = wgt\n else:\n curr.next = Edge(w, wgt, curr, curr.next)\n if curr.next.next is not None:\n curr.next.next.prev = curr.next\n self.numEdge += 1\n\n def weight(self, v, w):\n curr = self.find(v, w)\n if curr.next is None or curr.next.vertex != w:\n return 0\n else:\n return curr.next.weight\n\n def removeEdge(self, v, w):\n curr = self.find(v, w)\n if curr.next is None or curr.next.vertex != w:\n return\n curr.next = curr.next.next\n if curr.next is not None:\n curr.next.prev = curr\n self.numEdge -= 1\n\n def hasEdge(self, v, w):\n return self.weight(v, w) != 0\n\n def neighbors(self, v):\n cnt = 0\n curr = self.nodeArray[v].next\n while curr is not None:\n cnt += 1\n curr = curr.next\n temp = [0] * cnt\n cnt = 0\n curr = self.nodeArray[v].next\n while curr is not None:\n temp[cnt] = curr.vertex\n cnt += 1\n curr = curr.next\n return temp\n#/* *** ODSAendTag: GraphL *** */\n","sub_path":"SourceCode/Python/Graphs/GraphL.py","file_name":"GraphL.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"14053780","text":"import os\r\nimport shutil\r\nimport time\r\ndef main():\r\n deletedfoldercount=0\r\n deletedfilescount=0\r\n path=\"C:\\\\Users\\\\91706\\\\Desktop\\\\atmproject\\\\example\"\r\n days=30\r\n seconds=time.time()-(days*24*60*60)\r\n if os.path.exists(path):\r\n for routefolder,folders,files in os.walk(path):\r\n if seconds>=get_file_or_folder_age(routefolder):\r\n remove_folder(routefolder)\r\n deletedfoldercount+=1\r\n break\r\n else:\r\n for folder in folders:\r\n folder_path=os.path.join(routefolder,folder)\r\n if seconds>=get_file_or_folder_age(folder_path):\r\n remove_folder(folder_path)\r\n deletedfoldercount+=1\r\n for file in files:\r\n file_path=os.path.join(routefolder,file)\r\n if seconds >= get_file_or_folder_age (file_path):\r\n remove_file(file_path)\r\n deletedfilescount+=1\r\n else:\r\n if seconds >= get_file_or_folder_age(path):\r\n remove_file(path)\r\n deletedfilescount+=1\r\n else:\r\n print(f'\"{path}\"is not found')\r\n deletedfilescount+=1\r\n print(f\"total folders deleted: {deletedfoldercount}\")\r\n print(f\"total files deleted: {deletedfilescount}\")\r\ndef remove_file(path):\r\n if not os.remove(path):\r\n print(f\"{path} is removed succesfully\")\r\n else:\r\n print(\"enable to delete the path\"+path)\r\ndef get_file_or_folder_age(path):\r\n ctime=os.stat(path).st_ctime\r\n return ctime\r\nif __name__==\"__main__\":\r\n main()\r\n\r\n","sub_path":"backfiles/backupfile.py","file_name":"backupfile.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"150591334","text":"import os\nimport re\nimport subprocess\nimport sys\nimport time\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Optional, Sequence\n\nimport docker\nimport requests\n\nimport determined_deploy\nfrom determined_common import api\n\n# This object, when included in the host config in a container creation request, tells Docker to\n# expose all host GPUs inside a container.\nGPU_DEVICE_REQUEST = {\"Driver\": \"nvidia\", \"Count\": -1, \"Capabilities\": [[\"gpu\", \"utility\"]]}\n\n\n# Patch the Docker library to support device requests, since it has yet to support them natively\n# (see https://github.com/docker/docker-py/issues/2395).\ndef _patch_docker_for_device_requests() -> None:\n _old_create_container_args = docker.models.containers._create_container_args\n\n def _create_container_args(kwargs: Any) -> Any:\n device_requests = kwargs.pop(\"device_requests\", None)\n create_kwargs = _old_create_container_args(kwargs)\n if device_requests:\n create_kwargs[\"host_config\"][\"DeviceRequests\"] = device_requests\n return create_kwargs\n\n docker.models.containers._create_container_args = _create_container_args\n\n\n_patch_docker_for_device_requests()\n\n\ndef get_shell_id() -> str:\n args = [\"id\", \"-u\", \"-n\"]\n byte_str: str = subprocess.check_output(args, encoding=\"utf-8\")\n return byte_str.rstrip(\"\\n\").strip(\"'\").strip()\n\n\ndef _make_master_url(master_host: str, master_port: int, suffix: str = \"\") -> str:\n return \"http://{}:{}/{}\".format(master_host, master_port, suffix)\n\n\ndef get_proxy_addr() -> str:\n # The Determined proxying code relies on docker port-mapping container ports to host\n # ports, and it uses the IP address of the agent as a way to address spawned\n # docker containers. This breaks down when running in a docker compose\n # environment, because the address of the agent is not the address of the\n # docker host. As a work-around, force agents to report their IP address as the\n # IP address of the host machine.\n if \"darwin\" in sys.platform:\n # On macOS, docker runs in a VM and host.docker.internal points to the IP\n # address of this VM.\n return \"host.docker.internal\"\n else:\n # On non-macOS, host.docker.internal does not exist. Instead, grab the source IP\n # address we would use if we had to talk to the internet. The sed command\n # searches the first line of its input for \"src\" and prints the first field\n # after that.\n proxy_addr_args = [\"ip\", \"route\", \"get\", \"8.8.8.8\"]\n pattern = r\"s|.* src +(\\S+).*|\\1|\"\n s = subprocess.check_output(proxy_addr_args, encoding=\"utf-8\")\n matches = re.match(pattern, s)\n if matches is not None:\n groups: Sequence[str] = matches.groups()\n if len(groups) != 0:\n return groups[0]\n return \"\"\n\n\ndef docker_compose(\n args: List[str],\n cluster_name: str,\n env: Optional[Dict] = None,\n extra_files: Optional[List[str]] = None,\n) -> None:\n path = Path(__file__).parent.joinpath(\"docker-compose.yaml\")\n # Start with the user's environment to ensure that Docker and Docker Compose work correctly.\n process_env = dict(os.environ)\n if env is not None:\n # raise ValueError(str(env))\n process_env.update(env)\n process_env[\"INTEGRATIONS_PROXY_ADDR\"] = get_proxy_addr()\n base_command = [\"docker-compose\", \"-f\", str(path), \"-p\", cluster_name]\n if extra_files is not None:\n for extra_file in extra_files:\n base_command += [\"-f\", extra_file]\n args = base_command + args\n subprocess.run(args, env=process_env)\n\n\ndef _wait_for_master(master_host: str, master_port: int, cluster_name: str) -> None:\n for _ in range(50):\n try:\n r = api.get(_make_master_url(master_host, master_port), \"info\", authenticated=False)\n if r.status_code == requests.codes.ok:\n return\n except api.errors.MasterNotFoundException:\n pass\n print(\"Waiting for master to be available...\")\n time.sleep(2)\n\n print(\"Timed out connecting to master, but attempting to dump logs from cluster...\")\n docker_compose([\"logs\"], cluster_name)\n raise ConnectionError(\"Timed out connecting to master\")\n\n\ndef master_up(\n port: int,\n master_config_path: Path,\n master_name: str,\n version: Optional[str],\n db_password: str,\n delete_db: bool,\n autorestart: bool,\n cluster_name: str,\n) -> None:\n command = [\"up\", \"-d\"]\n extra_files = []\n if master_config_path is not None:\n master_config_path = Path(master_config_path).resolve()\n mount_yaml = Path(__file__).parent.joinpath(\"mount.yaml\").resolve()\n extra_files.append(str(mount_yaml))\n if version is None:\n version = determined_deploy.__version__\n if autorestart:\n restart_policy = \"unless-stopped\"\n else:\n restart_policy = \"no\"\n env = {\n \"INTEGRATIONS_HOST_PORT\": str(port),\n \"DET_MASTER_CONFIG\": str(master_config_path),\n \"DET_DB_PASSWORD\": db_password,\n \"DET_VERSION\": version,\n \"DET_RESTART_POLICY\": restart_policy,\n }\n master_down(master_name, delete_db)\n docker_compose(command, master_name, env, extra_files=extra_files)\n _wait_for_master(\"localhost\", port, cluster_name)\n\n\ndef master_down(master_name: str, delete_db: bool) -> None:\n if delete_db:\n docker_compose([\"down\", \"--volumes\", \"-t\", \"1\"], master_name)\n else:\n docker_compose([\"down\", \"-t\", \"1\"], master_name)\n\n\ndef cluster_up(\n num_agents: int,\n port: int,\n master_config_path: Path,\n cluster_name: str,\n version: Optional[str],\n db_password: str,\n delete_db: bool,\n no_gpu: bool,\n autorestart: bool,\n) -> None:\n cluster_down(cluster_name, delete_db)\n master_up(\n port=port,\n master_config_path=master_config_path,\n master_name=cluster_name,\n version=version,\n db_password=db_password,\n delete_db=delete_db,\n autorestart=autorestart,\n cluster_name=cluster_name,\n )\n for agent_number in range(num_agents):\n agent_name = cluster_name + f\"-agent-{agent_number}\"\n labels = {\"determined.cluster\": cluster_name}\n agent_up(\n master_host=\"localhost\",\n master_port=port,\n agent_name=agent_name,\n version=version,\n labels=labels,\n no_gpu=no_gpu,\n autorestart=autorestart,\n cluster_name=cluster_name,\n )\n\n\ndef cluster_down(cluster_name: str, delete_db: bool) -> None:\n master_down(master_name=cluster_name, delete_db=delete_db)\n stop_cluster_agents(cluster_name=cluster_name)\n\n\ndef logs(cluster_name: str, no_follow: bool) -> None:\n docker_compose([\"logs\"] if no_follow else [\"logs\", \"-f\"], cluster_name)\n\n\ndef agent_up(\n master_host: str,\n master_port: int,\n agent_name: str,\n version: Optional[str],\n no_gpu: bool,\n autorestart: bool,\n cluster_name: str,\n labels: Optional[Dict] = None,\n) -> None:\n if version is None:\n version = determined_deploy.__version__\n\n _wait_for_master(master_host, master_port, cluster_name)\n\n if master_host == \"localhost\":\n master_host = get_proxy_addr()\n image = \"determinedai/determined-agent:{}\".format(version)\n environment = {\n \"DET_MASTER_HOST\": master_host,\n \"DET_MASTER_PORT\": master_port,\n \"DET_AGENT_ID\": agent_name,\n }\n init = True\n volumes = [\"/var/run/docker.sock:/var/run/docker.sock\"]\n mounts = [] # type: List[str]\n if labels is None:\n labels = {}\n labels[\"ai.determined.type\"] = \"agent\"\n if autorestart:\n restart_policy = {\"Name\": \"unless-stopped\"} # type: Optional[Dict[str, str]]\n else:\n restart_policy = None\n if no_gpu:\n device_requests = None\n else:\n device_requests = [GPU_DEVICE_REQUEST]\n\n docker_client = docker.from_env()\n\n print(f\"Starting {agent_name}\")\n docker_client.containers.run(\n image=image,\n environment=environment,\n init=init,\n mounts=mounts,\n volumes=volumes,\n network_mode=\"host\",\n name=agent_name,\n detach=True,\n labels=labels,\n restart_policy=restart_policy,\n device_requests=device_requests,\n )\n\n\ndef _kill_containers(containers: docker.models.containers.Container) -> None:\n for container in containers:\n print(f\"Stopping {container.name}\")\n container.stop(timeout=20)\n print(f\"Removing {container.name}\")\n container.remove()\n\n\ndef stop_all_agents() -> None:\n docker_client = docker.from_env()\n filters = {\"label\": [\"ai.determined.type=agent\"]}\n to_stop = docker_client.containers.list(all=True, filters=filters)\n _kill_containers(to_stop)\n\n\ndef stop_cluster_agents(cluster_name: str) -> None:\n docker_client = docker.from_env()\n labels = [f\"determined.cluster={cluster_name}\"]\n filters = {\"label\": labels}\n to_stop = docker_client.containers.list(all=True, filters=filters)\n _kill_containers(to_stop)\n\n\ndef stop_agent(agent_name: str) -> None:\n docker_client = docker.from_env()\n filters = {\"name\": [agent_name]}\n to_stop = docker_client.containers.list(all=True, filters=filters)\n _kill_containers(to_stop)\n","sub_path":"deploy/determined_deploy/local/cluster_utils.py","file_name":"cluster_utils.py","file_ext":"py","file_size_in_byte":9301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"103036499","text":"\nclass Solution:\n def numSubseq(self, nums: List[int], target: int) -> int:\n res = 0\n n = len(nums)\n nums.sort()\n mod = 10 ** 9 + 7\n j = n - 1\n for i in range(n):\n while j>= i and nums[i] + nums[j] > target:\n j -= 1\n if j < i:\n break\n res += pow(2, j - i, mod)\n res %= mod\n\n return int(res)\n\n\n","sub_path":"LeetcodeNew/python2/LC_1498.py","file_name":"LC_1498.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"78156728","text":"def get_input():\n try:\n a = int(input(\"Enter the a value\"))\n b = int(input(\"Enter the b value\"))\n return a,b\n except ValueError as e:\n print(e)\n return get_input()\n finally:\n print(\"finally of get_input\")\n\ndef div(a,b):\n try:\n c = a/b\n return c\n except ZeroDivisionError as e:\n print(e)\n finally:\n print(\"finally of division\")\n\ndef main():\n try:\n a,b = get_input()\n c = div(a,b)\n print(c)\n except ZeroDivisionError as e:\n print(e)\n main()\n except:\n print(\"Exception is arrised\")\n main()\n finally:\n print(\"Process completed\")","sub_path":"practise/Dec 13.py","file_name":"Dec 13.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"572588360","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Apr 15 10:44:11 2017\r\n\r\n@author: AmatVictoriaCuramIII\r\n\"\"\"\r\n\r\nfrom pandas_datareader import data\r\nimport time as t \r\nimport numpy as np\r\nimport pandas as pd\r\nfrom DatabaseGrabber import DatabaseGrabber\r\nAggregate = pd.read_pickle('RUTModADXAGGSHARPE065')\r\n# starttime = t.time() \r\nbase = 0\r\nticker = '^RUT'\r\nq = DatabaseGrabber(ticker)\r\nq2 = pd.DataFrame({'Open':[1407.22],'High':[1407.22],'Low':[1399.26],'Close':[0],'Volume':[0],\r\n'Adj Close':[1403.91]},index = ['2017-06-09 00:00:00']) #interday\r\nq = pd.concat([q,q2])\r\nq['UpMove'] = q['High'] - q['High'].shift(1)\r\nq['DownMove'] = q['Low'] - q['Low'].shift(1)\r\nq['LogRet'] = np.log(q['Adj Close']/q['Adj Close'].shift(1)) \r\nq['LogRet'] = q['LogRet'].fillna(0)\r\nq['Method1'] = q['High'] - q['Low']\r\nq['Method2'] = abs((q['High'] - q['Adj Close'].shift(1)))\r\nq['Method3'] = abs((q['Low'] - q['Adj Close'].shift(1)))\r\nq['Method1'] = q['Method1'].fillna(0)\r\nq['Method2'] = q['Method2'].fillna(0)\r\nq['Method3'] = q['Method3'].fillna(0)\r\nq['TrueRange'] = q[['Method1','Method2','Method3']].max(axis = 1)\r\nq['PDM'] = (q['High'] - q['High'].shift(1))\r\nq['MDM'] = (q['Low'].shift(1) - q['Low'])\r\nq['PDM'] = q['PDM'][q['PDM'] > 0]\r\nq['MDM'] = q['MDM'][q['MDM'] > 0]\r\nq['PDM'] = q['PDM'].fillna(0)\r\nq['MDM'] = q['MDM'].fillna(0)\r\n# counter = 0\r\nsize = len(Aggregate.iloc[0])\r\nadvice = 0\r\nfor i in Aggregate:\r\n# counter = counter + 1 \r\n# ratio = counter/size\r\n aa = Aggregate.loc[0,i] #numer of days for moving average window\r\n a = aa.astype(int)\r\n b = Aggregate[i].iloc[1]\r\n c = Aggregate[i].iloc[2] \r\n d = Aggregate[i].iloc[3]\r\n window = a\r\n q['AverageTrueRange'] = q['TrueRange'].rolling(window = window,\r\n center=False).sum()\r\n q['AverageTrueRange'] = ((q['AverageTrueRange'].shift(1)*(window-1\r\n ) + q['TrueRange']) / window)\r\n q['SmoothPDM'] = q['PDM'].rolling(window = window,\r\n center=False).sum()\r\n q['SmoothPDM'] = ((q['SmoothPDM'].shift(1)*(window-1\r\n ) + q['PDM']) / window)\r\n q['SmoothMDM'] = q['MDM'].rolling(window = window,\r\n center=False).sum()\r\n q['SmoothMDM'] = ((q['SmoothMDM'].shift(1)*(window-1\r\n ) + q['MDM']) / window)\r\n q['PDI'] = (100*(q['SmoothPDM']/q['AverageTrueRange']))\r\n q['MDI'] = (100*(q['SmoothMDM']/q['AverageTrueRange']))\r\n q['DIdiff'] = abs(q['PDI'] - q['MDI'])\r\n q['DIdivergence'] = q['PDI'] - q['MDI']\r\n q['DIsum'] = q['PDI'] + q['MDI']\r\n q['DX'] = (100 * (q['DIdiff']/q['DIsum']))\r\n q['DX'] = q['DX'].fillna(0)\r\n q['ADX'] = q['DX'].rolling(window = window, center = False).mean()\r\n q['ADXmean'] = q['ADX'].mean() * b\r\n q['Touch'] = np.where(q['DIdivergence'] < c, 1,0) #long signal\r\n q['Touch'] = np.where(q['DIdivergence'] > d, -1, q['Touch']) #short signal\r\n q['Sustain'] = 0\r\n q['Sustain'] = np.where(q['ADX'] > q['ADXmean'], 0, q['Sustain']) #if RSI is greater than threshold, sustain is forced to 0\r\n q['Sustain'] = np.where(q['ADX'] < q['ADXmean'], (q['Touch']*-1\r\n ), q['Sustain']) #never actually true when optimized\r\n q['Regime'] = q['Touch'] + q['Sustain']\r\n if len(q) <= 1:\r\n continue\r\n toadd = q['Regime'].iloc[-1]\r\n base = base + toadd\r\n advice = float(base/size)\r\n print(advice)\r\nprint(advice)\r\nModADXOptimal = pd.read_pickle('TLTModADXAGGOptimal')\r\naa = ModADXOptimal.iloc[0]\r\nbb = ModADXOptimal.iloc[1]","sub_path":"ModADXAdviceGiver.py","file_name":"ModADXAdviceGiver.py","file_ext":"py","file_size_in_byte":3603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"21755247","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/cjld/new_jittor/jittor/python/jittor/test/test_index_op.py\n# Compiled at: 2020-03-20 04:44:53\n# Size of source mod 2**32: 2177 bytes\nimport unittest, jittor as jt, numpy as np\n\nclass TestIndexOp(unittest.TestCase):\n\n def test(self):\n assert (jt.index([2, 2], 0).data == [[0, 0], [1, 1]]).all()\n assert (jt.index([2, 2], 1).data == [[0, 1], [0, 1]]).all()\n a = jt.index([2, 2], 0)\n b = jt.index([2, 2], 1)\n c = a + b\n assert (c.data == [[0, 1], [1, 2]]).all(), c.data\n\n def test_multioutput(self):\n a, b = jt.index([2, 2])\n jt.sync([a, b])\n assert (a.data == [[0, 0], [1, 1]]).all()\n assert (b.data == [[0, 1], [0, 1]]).all(), b.data\n\n def test_multioutput2(self):\n a, b = jt.index([3, 3])\n assert (a.data == [[0, 0, 0], [1, 1, 1], [2, 2, 2]]).all()\n assert (b.data == [[0, 1, 2], [0, 1, 2], [0, 1, 2]]).all(), b.data\n a, b = jt.index([3, 3])\n c = a + b\n assert (c.data == [[0, 1, 2], [1, 2, 3], [2, 3, 4]]).all(), c.data\n\n def test_multioutput3(self):\n a, b = jt.index([3, 3])\n del a\n assert (b.data == [[0, 1, 2], [0, 1, 2], [0, 1, 2]]).all(), b.data\n\n def test_vary_shape_dep(self):\n a, = jt.where([1, 0, 1])\n b, = a.index_var()\n if not (a.uncertain_shape == [-3] and b.uncertain_shape == [-3]):\n raise AssertionError\n assert (b.data == [0, 1]).all()\n\n def test_vary_shape_dep2(self):\n a = jt.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n index0, = jt.where(a.sum(1) > 7)\n index0 = index0.broadcast([1, 3], dims=[1])\n index1 = index0.index_var(1)\n b = a.reindex_var([index0, index1])\n assert b.uncertain_shape == [-3, 3]\n assert (b.data == [[4, 5, 6], [7, 8, 9]]).all()\n assert (index0.data == [[1, 1, 1], [2, 2, 2]]).all()\n assert (index1.data == [[0, 1, 2], [0, 1, 2]]).all()\n\n def test_doc(self):\n assert 'Index Operator' in jt.index.__doc__\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"pycfiles/jittor-1.0.0.tar/test_index_op.cpython-37.py","file_name":"test_index_op.cpython-37.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"240394537","text":"from collections import Counter\nimport itertools\nimport heapq as hq\nfrom functools import total_ordering\n\n\n@total_ordering\nclass Node:\n\n def __init__(self, character, nb_oc):\n '''\n On implémente une class Node, qui correspond à l'objet Noeud qui contienT 4 informations:\n Le string correspondant, le nb d'occurences, le noeud à gauche et le noeud à droite\n '''\n self.character = character\n self.nb_oc = nb_oc\n self.left = None\n self.right = None\n\n def __eq__(self, other):\n '''\n Pour pouvoir comparer deux objets Node. \n La comparaison est nécessaire pour établir des priorités dans nos stacks pour établir\n l'abre. Total ordering permet de ne que avoir à définir __eq__ __ne__ et __lt__ \n au lieu de tous les comparateurs.\n '''\n if other == None:\n return -1\n elif not isinstance(other, Node):\n return -1\n return self.nb_oc == other.nb_oc\n\n def __ne__(self, other):\n return self.nb_oc != other.nb_oc\n\n def __lt__(self, other):\n return self.nb_oc < other.nb_oc\n\n def __repr__(self):\n '''\n Pas vraiment nécessaire\n '''\n return f'({self.character}, {self.nb_oc})'\n\n\nclass TreeBuilder:\n\n def __init__(self, text: str):\n self.text = text\n self.stack = []\n\n def dico_nb_oc(self):\n '''\n Renvoie le dictionnaire avec le nombre d'occurences de chaque caractère.\n '''\n res = Counter(self.text)\n dico_nb_oc = dict(res)\n return dico_nb_oc\n\n def creer_stack(self):\n '''\n On créer un stack, une pile en gros, mais le module heapq permet d'implémenter\n des priorités plus intéressante que FIFO ou FILO. Ce module nous permet de sortir\n les deux plus petits éléments, c'est pour cela qu'il a fallu définir la comparaison \n entre Node!\n '''\n dico = self.dico_nb_oc()\n for x, y in dico.items():\n node = Node(x, y)\n hq.heappush(self.stack, node)\n\n def merge_noeuds(self, node1: Node, node2: Node):\n '''\n Vu qu'on va partir de notre dictionnaire de fréquence puis construire notre arbre en \n remontant depuis les caractères à occurence la plus faible il faut pouvoir merger les\n noeuds entre eux pour construire un nouveau noeud.\n '''\n merge = Node(node1.character + node2.character,\n node1.nb_oc + node2.nb_oc)\n merge.left = node1\n merge.right = node2\n return merge\n\n def tree(self):\n '''\n Construit un arbre binaire pour effectuer un codage de huffman sur le string du TreeBuiler.\n L'abre est une liste ordonnée de noeuds.\n '''\n self.creer_stack()\n final_tree = []\n while len(self.stack) > 1:\n node1 = hq.heappop(self.stack)\n node2 = hq.heappop(self.stack)\n hq.heappush(self.stack, self.merge_noeuds(node1, node2))\n final_tree.append(node1)\n final_tree.append(node2)\n a = hq.heappop(self.stack)\n final_tree.append(a)\n self.stack = [] # pour que plusieurs appels sur le même ojet TreeBuiler ne fausse rien\n return final_tree\n\n\npass\n\n\nclass Codec:\n\n def __init__(self, tree):\n self.encodage = {}\n self.reverse = {}\n # pour ne pas avoir des modif sur l'original.\n self.tree = [element for element in tree]\n\n def encode_prelim(self, noeud, encodage):\n '''\n Retourne le text encodé avec l'arbre binaire du codec.\n On utilise un algorithme récursif qui va aider à définir deux dictionnaires.\n Le premier aura en clé les caractères \"de base\" c'est à dire de longueur 1 et en valeur\n il aura l'encodage correspondant. Le deuxième aura l'inverse.\n '''\n if noeud is None: # if not noeud\n return\n if noeud is not None: # if noeud\n if len(noeud.character) == 1:\n self.encodage[noeud.character] = encodage\n self.reverse[encodage] = noeud.character\n self.encode_prelim(noeud.left, encodage + '0')\n self.encode_prelim(noeud.right, encodage + '1')\n\n def encode(self, text: str):\n self.encode_prelim(self.tree[-1], '')\n coded = ''\n for character in text:\n coded += self.encodage[character]\n return coded # on aurait pu le passer en attribut pour encode_bin\n\n def decode(self, coded_text: str):\n decoded = ''\n code = ''\n for bit in coded_text:\n code += bit\n if code in self.reverse:\n decoded += self.reverse[code]\n code = ''\n return decoded\n\n def encode_bin(self, text: str):\n '''\n L'idée est que python store sur des bytes par défaut. Pour vraiment pouvoir compresser\n le fichier il faut donc prendre notre séquence encodée et la découper en portions de 8 bits\n qu'on assigne à un byte. On réduira ainsi la taille en mémoire par 8.\n Il faut cependant faire attention car notre la longueur de notre encodage n'est pas forcément \n divisible par 8, auquel cas il faut ajouter des 0 à la fin qu'il faudra pas oublier d'enlever\n en décodant. Pour ne pas perdre l'information du nombre de 0 qu'on a rajouté, on ajoute un \n byte à la fin avec des 0 et le nombre de 0 rajouté en binaire.\n '''\n coded = self.encode(text)\n length = len(coded)\n nb_zeros_rajoute = 8 - length % 8\n binary_encoded = bytearray()\n for _ in range(nb_zeros_rajoute): # on rajoute les zéros au codage en string\n coded += '0'\n # le byte avec le nb de 0 rajouté\n byte_avec_info = \"{0:08b}\".format(nb_zeros_rajoute)\n coded = byte_avec_info + coded\n\n for i in range(0, len(coded)//8, 8): # on passe en format byte\n current_byte = coded[i:i+8]\n # rajouter sur un seul byte\n binary_encoded.append(int(current_byte, base=2))\n return binary_encoded\n\n def decode_bin(self, code: bytearray):\n '''\n Le decodage est assez complexe.\n '''\n pass\n\n\npass\n","sub_path":"huffman/codec.py","file_name":"codec.py","file_ext":"py","file_size_in_byte":6276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"477802516","text":"import pygame as py\r\nimport sys ,setup\r\n\r\nscreen = py.display.set_mode((900,300))\r\npy.display.set_caption('STRIKER GAME')\r\ngame_clock = py.time.Clock()\r\npy.init()\r\n\r\ndef ball():\r\n global a\r\n global b\r\n global lp\r\n global rp\r\n x,y=setup.ball['center']\r\n i,j=setup.striker['striker1']['center']\r\n o,u=setup.striker['striker2']['center']\r\n if x==6:\r\n a=-a\r\n lp+=1\r\n if x==894:\r\n a=-a\r\n rp+=1\r\n if y==6 or y==294:\r\n b=-b\r\n if ((112==x or 88==x) and j-20<=y<=j+20) or ((788==x or 812==x) and u-20<=y<=u+20):\r\n a=-a\r\n if ((y==j-20 or y==j+20) and 88<=x<=112) or ((y==u-20 or y==u+20) and 788<=x<=812):\r\n b=-b\r\n x+=a\r\n y+=b\r\n setup.ball['center'] = (x,y)\r\n setup.ball['rect'].center = (x,y)\r\n\r\ndone=False\r\nuu=0\r\na=1\r\nb=1\r\nrp=0\r\nlp=0\r\ngreen = (0, 200, 0) \r\nfont = py.font.Font('freesansbold.ttf',25)\r\nwhile not done:\r\n ball()\r\n keypress=py.key.get_pressed()\r\n for event in py.event.get():\r\n if event.type == py.QUIT:\r\n sys.exit()\r\n done = True\r\n if keypress[py.K_w]:\r\n setup.translate(setup.striker['striker1'],1)\r\n if keypress[py.K_UP]:\r\n setup.translate(setup.striker['striker2'],1)\r\n if keypress[py.K_s]:\r\n setup.translate(setup.striker['striker1'],-1)\r\n if keypress[py.K_DOWN]:\r\n setup.translate(setup.striker['striker2'],-1)\r\n if rp<=5 and lp<=5:\r\n text = font.render('%2d :%3d'%(lp,rp), True, green) \r\n textRect = text.get_rect()\r\n textRect.center = (450,20)\r\n setup.draw(text, textRect)\r\n if rp==5:\r\n text = font.render('left guy won', True, green) \r\n textRect = text.get_rect()\r\n textRect.center = (450,150)\r\n screen.blit(text, textRect)\r\n py.display.update()\r\n count=0\r\n while count<1000:\r\n screen.blit(text, textRect)\r\n py.display.update()\r\n count+=1\r\n done=False\r\n sys.exit()\r\n elif lp==5:\r\n text = font.render('right guy won', True, green) \r\n textRect = text.get_rect()\r\n textRect.center = (450,150)\r\n screen.blit(text, textRect)\r\n py.display.update()\r\n count=0\r\n while count<1000:\r\n screen.blit(text, textRect)\r\n py.display.update()\r\n count+=1\r\n done=False\r\n sys.exit()\r\n \r\n\r\n \r\n game_clock.tick(100+uu)\r\n uu+=0.02\r\npy.quit()\r\n \r\n","sub_path":"pygame/striker_game/striker_game.py","file_name":"striker_game.py","file_ext":"py","file_size_in_byte":2562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"379558460","text":"\"\"\"\n--- Part Two ---\nThe elves are also running low on ribbon. Ribbon is all the same width, so they only have to worry about the length they need to order, which they would again like to be exact.\n\nThe ribbon required to wrap a present is the shortest distance around its sides, or the smallest perimeter of any one face. Each present also requires a bow made out of ribbon as well; the feet of ribbon required for the perfect bow is equal to the cubic feet of volume of the present. Don't ask how they tie the bow, though; they'll never tell.\n\nFor example:\n\nA present with dimensions 2x3x4 requires 2+2+3+3 = 10 feet of ribbon to wrap the present plus 2*3*4 = 24 feet of ribbon for the bow, for a total of 34 feet.\nA present with dimensions 1x1x10 requires 1+1+1+1 = 4 feet of ribbon to wrap the present plus 1*1*10 = 10 feet of ribbon for the bow, for a total of 14 feet.\nHow many total feet of ribbon should they order?\n\"\"\"\n\nfile = open(\"input.txt\", 'r')\n\ntotal_ribbon = 0\n\nfor line in file:\n dimensions = line.split('x')\n l = int(dimensions[0])\n w = int(dimensions[1])\n h = int(dimensions[2].replace(\"\\n\", \"\"))\n a = l + l + w + w\n b = w + w + h + h\n c = h + h + l + l\n total_ribbon += l*w*h + min(min(a, b), c)\n\nfile.close()\n\nprint(total_ribbon)","sub_path":"day2/solution2.py","file_name":"solution2.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"403684475","text":"# -*- coding: iso-8859-1 -*-\r\n\r\nimport time\r\nimport math\r\nimport random\r\nimport os\r\nrandom.seed()\r\n\r\nglobal verb_level\r\nverb_level = 0 ## Level of statements to be written to the console. Higher levels mean more output\r\n\r\n####################################################\r\n###\r\n### Basic functions for data input, output, and visualization\r\n###\r\n####################################################\r\n\r\n\r\ndef niceprint_tree(tdic, einr = 0, inherit = '', spc = ' ', trunc=0, lists=True):\r\n ## This function takes a variable in dictionary or list format and transforms the values\r\n ## to a readable format in one single string variable.\r\n if type(tdic)==dict:\r\n if inherit == '': inherit = '{\\n'\r\n for k in sorted(tdic.keys()):\r\n if type(tdic[k]) == dict:\r\n inherit = inherit + einr*spc + str(k) + ': ' + niceprint_tree(tdic[k],einr+1,inherit = '{\\n',trunc=trunc)\r\n elif type(tdic[k]) == list and lists:\r\n inherit = inherit + einr*spc + str(k) + ': ' + niceprint_tree(tdic[k],einr+1,inherit = '[\\n',trunc=trunc)\r\n else:\r\n value = tdic[k]\r\n if type(value)==str:\r\n value = \"'\"+value+\"'\"\r\n elif type(value) in [int,float]:\r\n value = str(value)\r\n elif type(value) == list:\r\n value = str(value)\r\n else:\r\n value = str(value)\r\n if len(value) > trunc and trunc > 0:\r\n tail = int(trunc/2)\r\n value = value[:tail] + '...'+value[-tail:] + ' ('+str(len(value))+' characters)' \r\n inherit = inherit + einr*spc + str(k) + ': '+ value + '\\n'\r\n inherit = inherit + einr*spc + '}\\n'\r\n elif type(tdic)==list:\r\n if inherit == '': inherit = '[\\n'\r\n for e in tdic:\r\n if type(e) == dict:\r\n inherit = inherit + einr*spc + niceprint_tree(e,einr+1,inherit = spc+'{\\n',trunc=trunc)\r\n elif type(e) == list and lists:\r\n inherit = inherit + einr*spc + niceprint_tree(e,einr+1,inherit = spc+'[\\n',trunc=trunc)\r\n else:\r\n value = e\r\n if type(value)==str:\r\n value = \"'\"+value+\"'\"\r\n elif type(value) in [int,float]:\r\n value = str(value)\r\n elif type(value) == list:\r\n value = str(value)\r\n else:\r\n value = str(value)\r\n if len(value) > trunc and trunc > 0:\r\n tail = int(trunc/2)\r\n value = value[:tail] + '...'+value[-tail:] + ' ('+str(len(value))+' characters)' \r\n inherit = inherit + einr*spc + value + ',\\n'\r\n inherit = inherit + einr*spc + ']\\n' \r\n return inherit\r\n\r\ndef get_data(fname):\r\n ## Read a table from a tab-spaced text document\r\n infile = open(fname,'r')\r\n zeilen = infile.readlines()\r\n infile.close()\r\n header = zeilen[0][:-1]\r\n daten = {}\r\n variablen = header.split('\\t')\r\n for v in variablen:\r\n daten[v] = []\r\n for z in zeilen[1:]:\r\n datenzeile = z[:-1].split('\\t')\r\n if len(datenzeile) == len(variablen):\r\n for i in range(0,len(variablen)):\r\n daten[variablen[i]].append(datenzeile[i])\r\n else:\r\n print('Ungültige Zeile: \"'+z+'\"')\r\n return [daten,variablen]\r\n\r\ndef write_data(daten,fname):\r\n ## Write a table to a tab-spaced text document\r\n outfile = open(fname,'w')\r\n variablen = daten[1]\r\n ddic = daten[0]\r\n header = '\\t'.join(variablen)\r\n outfile.write(header+'\\n')\r\n for zeile in range(0,len(ddic[variablen[0]])):\r\n datenzeile = []\r\n for v in variablen:\r\n datenzeile.append(ddic[v][zeile])\r\n outfile.write('\\t'.join(datenzeile)+'\\n')\r\n outfile.close()\r\n\r\ndef write_sample(infile,outfile,size=100):\r\n ## Draw a sample with repetition from a data file and store the result\r\n source = open(infile,'r')\r\n inlines = source.readlines()\r\n source.close()\r\n target = open(outfile,'w')\r\n target.write(inlines[0])\r\n maxline = len(inlines)-1\r\n for b in range(size):\r\n sel = random.randint(1,maxline)\r\n target.write(inlines[sel])\r\n target.close()\r\n\r\n\r\ndef write_samples(infile,train_outfile,test_outfile,size=100):\r\n ## Draw a sample with repetition from a data file and store the result\r\n source = open(infile,'r')\r\n inlines = source.readlines()\r\n source.close()\r\n target1 = open(train_outfile,'w') ## Open Training Data\r\n target1.write(inlines[0])\r\n target2 = open(test_outfile,'w') ## Open Test Data\r\n target2.write(inlines[0])\r\n\r\n cases = range(1,len(inlines))\r\n\r\n testcases = random.sample(cases,size)\r\n\r\n for i in cases:\r\n if i in testcases:\r\n target1.write(inlines[i])\r\n else:\r\n target2.write(inlines[i])\r\n \r\n target1.close()\r\n target2.close()\r\n \r\n\r\ndef get_unique(liste):\r\n ## Return unique values from a list\r\n td = {}\r\n for element in liste:\r\n td[element] = 0\r\n return sorted(td.keys())\r\n\r\ndef verb(zeile,nl=1,verbose=0):\r\n ## Print statements\r\n global verb_level\r\n if verb_level >= verbose:\r\n if nl == 0:\r\n print(zeile),\r\n else:\r\n print(zeile)\r\n\r\n\r\ndef c_mittel(liste,ln=0):\r\n ##Calculate the mean value of a numeric list\r\n anz = 0\r\n summe = 0.0\r\n for e in liste:\r\n try:\r\n if ln == 1:\r\n summe = summe + math.log(float(e))\r\n else:\r\n summe = summe + float(e)\r\n anz = anz + 1\r\n except:\r\n summe = summe\r\n\r\n if anz > 0:\r\n mittel = summe/anz\r\n else:\r\n mittel = 0\r\n return mittel\r\n\r\ndef c_sdev(liste,ln=0):\r\n #Calculate the standard deviation of a numeric list\r\n m = c_mittel(liste,ln)\r\n summe = 0.0\r\n anz = 0\r\n if not m == 0:\r\n for e in liste:\r\n try:\r\n if ln == 1:\r\n summe = summe + (math.log(e)-m)**2\r\n else:\r\n summe = summe + (e-m)**2\r\n anz = anz + 1\r\n except:\r\n summe = summe\r\n\r\n if anz > 1:\r\n sd = (summe/(anz-1))**.5\r\n else:\r\n sd = 0\r\n else:\r\n sd = 0\r\n\r\n return sd\r\n\r\ndef c_low(liste,alpha=0.05):\r\n #Calculate the lower bound of the 1-alpha CI\r\n tail = int(len(liste)*alpha)\r\n vlist = []\r\n for v in liste:\r\n try:\r\n vlist.append(float(v))\r\n except:\r\n a = 1\r\n outval = sorted(vlist)[tail]\r\n return outval\r\n\r\ndef c_hig(liste,alpha=0.05):\r\n #Calculate the lower bound of the 1-alpha CI\r\n tail = int(len(liste)*alpha)\r\n if tail == 0: tail = 1\r\n vlist = []\r\n for v in liste:\r\n try:\r\n vlist.append(float(v))\r\n except:\r\n a = 1\r\n outval = sorted(vlist)[-tail]\r\n return outval\r\n\r\n\r\n####################################################\r\n###\r\n### Evolutionary algorithm\r\n###\r\n####################################################\r\n\r\n\r\ndef init_settings(fname,manualset,force=0):\r\n ## Initialize the basic settings for the evolutionary algorithm.\r\n ## The settings are written to a file (fname). If the file\r\n ## already exists and force is set to 1, the settings from\r\n ## the file are taken instead of the manually set values.\r\n ## This function may be used to change the boundary conditions\r\n ## of the evolutionary algorithm in mid-execution.\r\n \r\n global g_settings\r\n if force == 0:\r\n try:\r\n gset = open(fname,'r')\r\n gs = gset.readline()\r\n gset.close()\r\n while not gs[-1]=='}':\r\n gs = gs[:-1]\r\n gs = eval(gs)\r\n print('Settings loaded: '+str([gs]))\r\n except:\r\n print('There are no previously stored settings. Setting current.')\r\n gset = open(fname,'w')\r\n gset.write(str(manualset))\r\n gset.write('\\n')\r\n gset.close()\r\n gs = manualset\r\n else:\r\n gset = open(fname,'w')\r\n gset.write(str(manualset))\r\n gset.write('\\n')\r\n gset.close()\r\n gs = manualset \r\n g_settings = gs\r\n\r\ndef evol_manhattan(prior,mutation=0):\r\n ## Use the prior distributions (m and sd) to compute a new value\r\n ## If mutation is set to 1, the standard deviation is doubled, allowing\r\n ## for parameters outside the usual range.\r\n \r\n v = prior['Vert']\r\n mut = random.random()\r\n m = prior['M']\r\n if mut < mutation: ## In case of mutation events, increase the variance\r\n sd = prior['SD']*2\r\n else:\r\n sd = prior['SD']\r\n \r\n if v == 'norm':\r\n a = random.normalvariate(m,sd)\r\n elif v == 'normp':\r\n a = random.normalvariate(m,sd)\r\n if a < 0: a = 0 ## Force positive numbers. \r\n elif v == 'enorm':\r\n a = math.exp(random.normalvariate(m,sd))\r\n elif v == 'const':\r\n a = m\r\n return a\r\n\r\ndef evol_initialize(ps,anz=100):\r\n ## Initialize the prior dictionary for the evolutionary algorithm\r\n ## Each parameter is a key in this dictionary and holds previous\r\n ## and current values, as well as distribution information\r\n parlist = ps.keys()\r\n prior = {}\r\n for p in parlist:\r\n prior[p] = {}\r\n prior[p]['M'] = ps[p][0]\r\n prior[p]['SD'] = ps[p][1]\r\n prior[p]['Vert']=ps[p][2]\r\n prior[p]['Prev']=[]\r\n prior[p]['Curr']=[]\r\n prior[p]['Score']=0\r\n for i in range(anz):\r\n prior[p]['Curr'].append(evol_manhattan(prior[p]))\r\n return prior\r\n\r\ndef evol_write_result(prior,fname='results.txt'):\r\n ## Write the result of one generation of parameter sets to an external file.\r\n try:\r\n u1 = open(fname,'r')\r\n u1.close()\r\n except:\r\n u1 = open(fname,'w')\r\n for p in sorted(prior.keys()):\r\n if not p == 'Results':\r\n u1.write(p+'\\t')\r\n u1.write('Result\\n')\r\n u1.close()\r\n outfile = open(fname,'a')\r\n for i in range(len(prior['Results'])):\r\n for p in sorted(prior.keys()):\r\n if not p == 'Results':\r\n outfile.write(str(prior[p]['Curr'][i])+'\\t')\r\n outfile.write(str(prior['Results'][i])+'\\n')\r\n outfile.close()\r\n \r\n\r\ndef evol_update(prior,special_ind,gen_size=20,length=100,mutation=0.1,fname='upd.txt'):\r\n ## Eliminate parameter sets with low results an generate new sets.\r\n ## At the end, an updated version of the prior dictionary is returned.\r\n ## A detailed report for each parameter set is written to an external file for inspection\r\n global g_settings\r\n try:\r\n u1 = open(fname,'r')\r\n u1.close()\r\n except:\r\n u1 = open(fname,'w')\r\n u1.write('Timestamp\\tM_Result\\tSD_Result\\tLow_Result\\tHig_Result\\t')\r\n for p in sorted(prior.keys()):\r\n if not p == 'Results':\r\n u1.write('M_'+p+'\\tSD_'+p+'\\tLow_'+p+'\\tHig_'+p+'\\t')\r\n u1.write('\\n')\r\n u1.close()\r\n \r\n upd = open(fname,'a')\r\n while -1000 in prior['Results']:\r\n prior['Results'].remove(-1000) ###Remove failed simulations\r\n rm = c_mittel(prior['Results'])\r\n rs = c_sdev(prior['Results'])\r\n rl = c_low(prior['Results'])\r\n rh = c_hig(prior['Results'])\r\n upd.write(time.ctime()+'\\t')\r\n upd.write(str(rm)+'\\t'+str(rs)+'\\t'+str(rl)+'\\t'+str(rh)+'\\t') ##Write the result of the simulation before cleaning up the gene pool\r\n del prior['Results'] ##Remove results from the prior distribution for next run.\r\n for p in sorted(prior.keys()):\r\n keepers = []\r\n for i in range(len(prior[p]['Curr'])):\r\n if not i in special_ind[0]:\r\n prior[p]['Prev'].append(prior[p]['Curr'][i]) ##Kill losers, retain others as previous candidates\r\n if i in special_ind[1]:\r\n keepers.append(prior[p]['Curr'][i])\r\n \r\n if len(prior[p]['Prev'])>length:\r\n prior[p]['Prev'] = prior[p]['Prev'][-length:]\r\n if prior[p]['Vert'] in ['norm','pnorm','normp']:\r\n prior[p]['M']=c_mittel(prior[p]['Prev'])\r\n prior[p]['SD']=c_sdev(prior[p]['Prev'])\r\n prior[p]['Low']=c_low(prior[p]['Prev'])\r\n prior[p]['Hig']=c_hig(prior[p]['Prev'])\r\n elif prior[p]['Vert'] in ['const']:\r\n prior[p]['M']=prior[p]['M']\r\n prior[p]['SD']=0\r\n prior[p]['Low']=c_low(prior[p]['Prev'])\r\n prior[p]['Hig']=c_hig(prior[p]['Prev']) \r\n else:\r\n prior[p]['M']=c_mittel(prior[p]['Prev'],ln=1)\r\n prior[p]['SD']=c_sdev(prior[p]['Prev'],ln=1) \r\n prior[p]['Low']=c_low(prior[p]['Prev'])\r\n prior[p]['Hig']=c_hig(prior[p]['Prev'])\r\n\r\n if 'Overwrite' in g_settings.keys():\r\n ## If the settings include an Overwrite-dictionary, the means and standard deviations\r\n ## for specified parameters will be overwritten. Using this hack, parameters may be\r\n ## Changed in mid-execution. You may treat this option as a \"God of Evolution\" protocol.\r\n if p in g_settings['Overwrite'].keys():\r\n print('Overwriting parameter \"'+p+'\". Old settings: M='+str(prior[p]['M'])+'; SD='+str(prior[p]['SD']))\r\n if 'M' in g_settings['Overwrite'][p].keys(): prior[p]['M'] = g_settings['Overwrite'][p]['M']\r\n if 'SD' in g_settings['Overwrite'][p].keys(): prior[p]['SD'] = g_settings['Overwrite'][p]['SD']\r\n print(' --> New settings: M='+str(prior[p]['M'])+'; SD='+str(prior[p]['SD']))\r\n\r\n prior[p]['Curr'] = keepers\r\n for i in range(len(keepers),gen_size):\r\n prior[p]['Curr'].append(evol_manhattan(prior[p],mutation)) \r\n upd.write(str(prior[p]['M'])+'\\t'+str(prior[p]['SD'])+'\\t'+str(prior[p]['Low'])+'\\t'+str(prior[p]['Hig'])+'\\t')\r\n upd.write('\\n')\r\n upd.close()\r\n return prior\r\n\r\ndef evol_fail(rlist,share,legshare=0.0):\r\n ## Identify the failing candidates and return their IDs\r\n anz = int(len(rlist)*share)\r\n anzwin = int(len(rlist)*legshare)\r\n if anzwin==0:anzwin=1\r\n if anz + anzwin > len(rlist)-1:\r\n anzwin = len(rlist)-1-anz\r\n minscore = sorted(rlist)[anz]\r\n maxscore = sorted(rlist)[-anzwin]\r\n rl = []\r\n wl = []\r\n for i in range(len(rlist)):\r\n if rlist[i] < minscore or rlist[i]==-1000:\r\n rl.append(i)\r\n if rlist[i] >= maxscore:\r\n wl.append(i)\r\n print('win:',i,rlist[i])\r\n return [rl,wl]\r\n\r\n\r\n####################################################\r\n###\r\n### Simulation\r\n###\r\n####################################################\r\n\r\n## Basic functions to be used in the simulation\r\n\r\ndef vectorize(pdic,var):\r\n ## Return a vector of values from all agents\r\n outlist = []\r\n for p in sorted(pdic.keys()):\r\n outlist.append(pdic[p][var])\r\n return outlist\r\n\r\ndef calc_pearson(pdic,v1,v2):\r\n ## Compute the pearson correlation between two variables for all agents\r\n sv1 = 0.0\r\n sv2 = 0.0\r\n sdv1 = 0.0\r\n sdv2 = 0.0\r\n cov = 0.0\r\n anz = 0\r\n for p in pdic.keys():\r\n if type(pdic[p][v1]) == float and type(pdic[p][v2]) == float:\r\n sv1 = sv1 + pdic[p][v1]\r\n sv2 = sv2 + pdic[p][v2]\r\n anz = anz + 1\r\n mv1 = sv1 / anz\r\n mv2 = sv2 / anz\r\n for p in pdic.keys():\r\n if type(pdic[p][v1]) == float and type(pdic[p][v2]) == float:\r\n sdv1 = sdv1 + (pdic[p][v1]-mv1)**2\r\n sdv2 = sdv2 + (pdic[p][v2]-mv2)**2\r\n cov = cov + (pdic[p][v1]-mv1)*(pdic[p][v2]-mv2)\r\n sdv1 = (sdv1/anz)**.5\r\n sdv2 = (sdv2/anz)**.5\r\n cov = (cov/anz)\r\n if sdv1*sdv2 > 0:\r\n pcorr = cov/(sdv1*sdv2)\r\n else:\r\n pcorr = '-'\r\n\r\n return pcorr\r\n\r\ndef calc_identity(pdic,v1,v2):\r\n ## Compute the inverted mean square deviation for two variables for all agents.\r\n sqsum = 0.0\r\n anz = 0\r\n for p in pdic.keys():\r\n val1 = pdic[p][v1]\r\n val2 = pdic[p][v2]\r\n if type(val1) == float and type(val2) == float:\r\n sqsum = sqsum + (val1-val2)**2\r\n anz = anz + 1\r\n if anz > 0:\r\n sqmean = sqsum/anz\r\n else:\r\n sqmean = 1000\r\n\r\n return 1.0/sqmean\r\n\r\n\r\n## Initialization of the model\r\n\r\ndef initialize(pfile,mfile,attvar,verbose=1):\r\n ## Load the data from tables and pass it to initialize agents (pdic) and media environment (mdic)\r\n if verbose == 1:\r\n verb('Loading all Data...')\r\n pdata = get_data(pfile)[0]\r\n mdata = get_data(mfile)[0]\r\n\r\n if verbose ==1:\r\n verb('Variables in Individual Data: '+str(sorted(pdata.keys())))\r\n verb('Veriables in Media Data: '+str(sorted(mdata.keys())))\r\n verb('\\nCreating dictionaries')\r\n mdic = create_mdic(mdata)\r\n pdic = create_pdic(pdata,mdic.keys(),attvar,verbose=verbose)\r\n if verbose == 1:\r\n verb('Media identified: '+str(mdic.keys()))\r\n verb(str(len(pdic.keys()))+' Cases identified')\r\n return (pdic,mdic)\r\n\r\n\r\ndef create_mdic(data):\r\n ## Initiate the media dictionary which serves as environment\r\n timerange = get_unique(data['Week_Excel'])\r\n for i in range(len(timerange)):\r\n timerange[i] = int(timerange[i])\r\n mindate = int(timerange[0])\r\n maxdate = int(timerange[-1])\r\n arguments = ['Arg_Dicho_1','Arg_Dicho_2','Arg_Dicho_9']\r\n mdic = {}\r\n for m in get_unique(data['Medium']):\r\n mdic[m] = {}\r\n for d in timerange:\r\n mdic[m][d] = {}\r\n for a in arguments:\r\n mdic[m][d][a] = 0\r\n mdic[m][d]['Bias']= 0\r\n\r\n for i in range(len(data['Medium'])):\r\n try:\r\n d = int(data['Week_Excel'][i])\r\n except:\r\n d = maxdate\r\n m = data['Medium'][i] \r\n for arg in arguments:\r\n try:\r\n a = float(data[arg][i])\r\n except:\r\n a = 0\r\n mdic[m][d][arg]=a\r\n mdic[m][d]['Bias']=(mdic[m][d]['Arg_Dicho_1']-mdic[m][d]['Arg_Dicho_2'])/(mdic[m][d]['Arg_Dicho_1']+mdic[m][d]['Arg_Dicho_2']+mdic[m][d]['Arg_Dicho_9'])\r\n\r\n #verb(niceprint_tree(mdic))\r\n return mdic\r\n\r\n\r\ndef create_pdic(data,media,attvar=\"Einstellung_Pro1\", verbose = 1):\r\n ##Create the population dictionary which holds all agents\r\n invalid_cases = 0\r\n valid_vases = 0\r\n persvar = ['intnum','sex','age','Zeitung_W1','TV_W1','Zeitung_W2','TV_W2','Zeitung_W3','TV_W3',\r\n 'Einstellung_W1','Einstellung_W2','Einstellung_W3','Einstellung_Pro1','Einstellung_Pro2','Einstellung_Pro3',\r\n 'Cert1','Cert2','Cert3','Media_Reliance','Disk_Reliance','dim_bildung','dim_x','dim_y','dim_lr','dim_spr']\r\n medvar = ['TV_W1', 'TV_W2', 'TV_W3', 'Zeitung_W1',\r\n 'Zeitung_W2', 'Zeitung_W3']\r\n \r\n pdic = {}\r\n for i in range(len(data['intnum'])):\r\n valid = 1\r\n ident = data['intnum'][i]\r\n try:\r\n v = float(data[attvar][i]) ##Remove agents of whom we do not know the attitude\r\n except:\r\n valid = 0\r\n if valid == 1:\r\n valid_cases = valid_vases + 1\r\n pdic[ident] = {}\r\n for p in persvar:\r\n try:\r\n pdic[ident][p] = float(data[p][i])\r\n except:\r\n pdic[ident][p] = 'NA'\r\n \r\n pdic[ident]['Einst'] = pdic[ident][attvar] #Initalize the dynamic attitude\r\n if type(pdic[ident]['Einst']) == str:\r\n pdic[ident]['Einst'] = 0.0\r\n pdic[ident]['Media'] = []\r\n for m in medvar:\r\n if data[m][i] in media:\r\n pdic[ident]['Media'].append(data[m][i])\r\n pdic[ident]['Media'] = get_unique(pdic[ident]['Media'])\r\n else:\r\n invalid_cases = invalid_cases + 1\r\n if verbose == 1:\r\n verb(str(invalid_cases)+' Invalid cases identified')\r\n verb(str(valid_cases)+' Agents in Simulation')\r\n \r\n return pdic\r\n\r\n\r\n## Computation of impacts\r\n\r\ndef calc_simpact(pdic,p1,param):\r\n ## Compute the social impact on one agent (p1)\r\n pl = list(pdic.keys())\r\n pl.remove(p1)\r\n bias = 0.0\r\n anz = 1.0\r\n \r\n for p2 in pl:\r\n if pdic[p1]['dim_spr'] == pdic[p2]['dim_spr']:\r\n dist_ort = (pdic[p1]['dim_x']-pdic[p2]['dim_x'])**2 + (pdic[p1]['dim_y']-pdic[p2]['dim_y'])**2\r\n try:\r\n dist_lr = (pdic[p1]['dim_lr']-pdic[p2]['dim_lr'])**2\r\n except:\r\n dist_lr = 0.5\r\n distance = dist_ort*param['coeff_d']**2+dist_lr\r\n if distance < 1:\r\n distance = 1\r\n \r\n bias = bias + (pdic[p2]['Einst']-pdic[p1]['Einst'])/distance\r\n anz = anz + 1/distance\r\n \r\n try:\r\n impact = bias / anz\r\n except:\r\n impact = 0.0\r\n verb('ERROR: Div by 0 in SI_impact (imp_pers, anz_pers, imp_supp, anz_supp): '+str(imp_pers)+'; '+str(anz_pers)+'; '+str(imp_supp)+'; '+str(anz_supp)) \r\n return impact\r\n\r\ndef calc_mimpact(mdic,medien,tag,param):\r\n ## Compute the media impact, using a list of media (medien)\r\n mimpact = 0.0\r\n anz = 0\r\n for m in medien:\r\n mimpact = mimpact + mdic[m][tag]['Bias']\r\n anz = anz + 1\r\n if anz > 1:\r\n mimpact = mimpact / anz\r\n return mimpact\r\n\r\n\r\ndef simulate(pdic, mdic, steps, param, benchvar=\"Einstellung_Pro2\"):\r\n ## Actual simulation, using the population, media environment, points in time, and parameters\r\n results = []\r\n ts = time.time()\r\n outsim = {}\r\n outsimvar=['Initial']\r\n outsim['Initial']=vectorize(pdic,'Einst')\r\n b_cert = param['coeff_e'] ## Use the coefficients directly\r\n b_reli = param['coeff_a']\r\n\r\n for d in steps:\r\n for p in pdic.keys():\r\n MI = calc_mimpact(mdic,pdic[p]['Media'],d,param)\r\n SI = calc_simpact(pdic,p,param)\r\n try:\r\n Msusc = param['coeff_b']+b_cert*pdic[p]['Cert1']+b_reli*pdic[p]['Media_Reliance']\r\n except:\r\n Msusc = 0.0\r\n\r\n try:\r\n Ssusc = param['coeff_c']+b_cert*pdic[p]['Cert1']\r\n except:\r\n Ssusc = 0.0\r\n \r\n pdic[p]['dEinst']=MI*Msusc + SI*Ssusc\r\n\r\n for p in pdic.keys():\r\n pdic[p]['Einst'] = pdic[p]['Einst']+pdic[p]['dEinst']\r\n r = calc_identity(pdic,'Einst',benchvar)\r\n verb('Mean square sum on day: '+str(d)+' = '+str(1.0/r),verbose=3)\r\n\r\n verb('.',nl=0,verbose=2)\r\n results.append(r)\r\n outsim[str(d)]=vectorize(pdic,'Einst')\r\n outsimvar.append(str(d))\r\n \r\n pearson_r = calc_pearson(pdic,'Einst',benchvar)\r\n\r\n #write_data(outsim,outsimvar,'tmp_outsim.txt') ## Write a temporary result of the simulation\r\n\r\n ts = time.time()-ts\r\n verb(' Simulation finished in: '+str(ts)+' Seconds',verbose=1)\r\n tr = open('tmp_w1_bs.txt','a')\r\n for p in sorted(param.keys()):\r\n tr.write(str(param[p])+'\\t')\r\n for r in results:\r\n tr.write(str(r)+'\\t')\r\n tr.write('\\n')\r\n tr.close()\r\n return [results[-1],pearson_r]\r\n\r\n\r\n\r\n####################################################\r\n###\r\n### Main program\r\n###\r\n####################################################\r\n\r\n## Set the basic settings for the evolutionary algorithm and bootstrapping\r\n\r\nglobal g_settings\r\ng_settings = {}\r\nmanualset = {'Gen_Size':30,\r\n 'Max_Gen':1,\r\n 'Max_Memory':60,\r\n 'Selection':.5,\r\n 'Legends':.1,\r\n 'Mutation': .2,\r\n 'N_Bootstrap':2000,\r\n 'S_Bootstrap':600}\r\ninit_settings('G_Settings.json',manualset,force=1)\r\n\r\n## If, at any later time during the evolution of parameter sets, you wish to change\r\n## one of these values, you may edit them in the 'G_Settings.json' file.\r\n## If the distribution of a coefficient is to be changed, use the parameter: 'Overwrite':{'coeff_e':{'M':1.5, 'SD':0.01}}\r\n## (In this case to overwrite the distribution of coeff_e with a mean of 1.5 and a standard deviation of 0.01\r\n\r\n\r\n##Set prior distributions (m and sd) for each parameter\r\n##pset = {'coeff_a':(0.0,.1,'norm'), ##Effect of Media Reliance\r\n## 'coeff_b':(0.0,.1,'norm'), ##Intercept: Media Impact\r\n## 'coeff_c':(0.0,.1,'norm'), ##Intercept: Social Impact\r\n## 'coeff_d':(2.0,.1,'normp'), ##Ratio of Spatial vs. Ideological distance\r\n## 'coeff_e':(0.0,.1,'norm')} ## Effect of Attitude certainty\r\n\r\npset = {'coeff_a':(0,.01,'norm'), ##Effect of Media Reliance\r\n 'coeff_b':(0,.01,'norm'), ##Intercept: Media Impact\r\n 'coeff_c':(0,.01,'norm'), ##Intercept: Social Impact\r\n 'coeff_d':(1.0,.03,'normp'), ##Ratio of Spatial vs. Ideological distance\r\n 'coeff_e':(0,.01,'norm')} ## Effect of Attitude certainty\r\n\r\nprior = evol_initialize(pset,g_settings['Gen_Size']) ## Initiate the prior distribution of parameter sets.\r\n\r\n\r\n## Set the boundary conditions for the simulation\r\nphase = 1\r\nif phase == 1: ## Distinguish between predicting the interval between wave 1&2 and wave 2&3\r\n simdates=[38927,38934,38941,38948,38955,38962] #Dates for Phase 1\r\n var_e1 = 'Einstellung_Pro1'\r\n var_e2 = 'Einstellung_Pro2'\r\nelif phase == 2:\r\n simdates=[38962,38969,38976,38983] #Dates for Phase 2\r\n var_e1 = 'Einstellung_Pro2'\r\n var_e2 = 'Einstellung_Pro3'\r\n\r\n## Set the names of the report files \r\noutfile1 = \"SimResult_within_full.txt\" ## Complete collection of simulation results for each parameter set\r\noutfile2 = \"SimResult_within.txt\" ## Summary per generation, providing M, SD and 95% CI for each parameter\r\nsummary_file = 'Summary_within.txt' ## Summary after maximal number of generations, including cross-validation with test data\r\n\r\nres_outfile = 1 ## Setting to reset or retain outfile1 and 2: 1: New Simulation output, 0: Append to previous output\r\nres_pset = 0 ## Setting to reset the prior distribution after each bootstrapping sample: 1: reset / 0: use the posterior from last sample\r\n\r\nbootstep = 0\r\nwhile bootstep < g_settings['N_Bootstrap']:\r\n bootstep = bootstep + 1\r\n ##Bootstrapping loop\r\n write_samples('Survey_Data.dat','train_sample.dat','test_sample.dat',g_settings['S_Bootstrap'])\r\n if res_outfile == 1:\r\n tr = open('tmp_w1_bs.txt','w')\r\n for p in sorted(pset.keys()):\r\n tr.write(str(p)+'\\t')\r\n for s in simdates:\r\n tr.write('Day_'+str(s)+'\\t')\r\n tr.write('\\n')\r\n tr.close()\r\n\r\n if res_pset == 1:\r\n prior = evol_initialize(pset,g_settings['Gen_Size'])\r\n\r\n generation = 0\r\n while generation < g_settings['Max_Gen']:\r\n ## Loop of the evolutionary algorithm\r\n \r\n verb('\\n\\n####################\\n##### Generation: '+str(generation+1)+' of '+str(g_settings['Max_Gen'])+'\\n####################')\r\n prior['Results'] = []\r\n init_settings('G_Settings.json',g_settings) ##Re-initialize settings. Probably overwrite old settings with new ones.\r\n for i in range(g_settings['Gen_Size']): ## Repeat the simulation for each parameter set\r\n dr2_list = []\r\n try:\r\n param = {}\r\n for p in pset.keys():\r\n param[p] = prior[p]['Curr'][i] ## Set the parameters to the parameter set to be used\r\n\r\n dics = initialize('train_sample.dat','Content_Data.dat',var_e1,verbose=0) ## Initialize the simulation\r\n pcorr_init_r = calc_pearson(dics[0],var_e1,var_e2) ## Get the initial correlation of attitudes\r\n pcorr_init_id = calc_identity(dics[0],var_e1,var_e2) ## Get the initial agreement of attitudes\r\n verb('\\nInitial inverted squaresum: '+str(pcorr_init_id),verbose=1)\r\n verb('\\nParameters: '+str(param),verbose=1)\r\n simresult = simulate(dics[0],dics[1],simdates,param,var_e2) ## Call the simulation and return the final agreement\r\n prior['Results'].append(simresult[0]) ## Notify the evolutionary algorithm of the result of this simulation\r\n except Exception as fehler:\r\n ##Emergency Error catch. The computation will not end because of strange errors. It just notifies the user.\r\n ##One reason for this error may be that the number of parameter sets per generation 'Gen_Size' was changed\r\n ##when a prior with a different generation size was already initialized. Will resolve itself on its own\r\n ##at the end of this generation when new parameter sets are initialized.\r\n prior['Results'].append(-1000)\r\n verb(str(fehler),0)\r\n verb('ERROR: Very strange error. Could not simulate individual. Dropping it with result = -1000')\r\n\r\n## inex = pcorr_init_r**2\r\n## inex2 = simresult[1]**2\r\n## dr2 = (simresult[1]**2-pcorr_init_r**2)*100\r\n inex = 1/pcorr_init_id ## Mean square deviation between the waves\r\n exp = 1/simresult[0] ## Mean square deviation after simulation\r\n dr2 = (inex-exp)/inex*100 ## Proportional reduction of error: Percent of explained variance by simulation\r\n dr2_list.append(dr2)\r\n \r\n verb('delta R2 (explained variance by simulation): '+\"{0:2.2f}%\".format(dr2),verbose=1)\r\n \r\n if pcorr_init_id < prior['Results'][-1]:\r\n verb('Result for Individual['+str(i)+']: '+str(i+1)+' from '+str(g_settings['Gen_Size'])+' (Gen: '+str(generation+1)+'/'+str(g_settings['Max_Gen'])+'): '+str(prior['Results'][-1])+'**',verbose=0)\r\n verb(niceprint_tree(param),verbose=1)\r\n else:\r\n verb('Result for Individual['+str(i)+']: '+str(i+1)+' from '+str(g_settings['Gen_Size'])+' (Gen: '+str(generation+1)+'/'+str(g_settings['Max_Gen'])+'): '+str(prior['Results'][-1]),verbose=0)\r\n verb(niceprint_tree(param),verbose=1)\r\n\r\n ## Inform on final result for this generation of the evolutionary algorithm\r\n \r\n verb('\\nInitial agreement was: '+str(pcorr_init_id),verbose=0)\r\n special_ind = evol_fail(prior['Results'],g_settings['Selection'],g_settings['Legends'])\r\n rem_individuals = special_ind[0]\r\n win_individuals = special_ind[1]\r\n verb('Removing individuals from herd: '+str(rem_individuals), nl=0,verbose=0)\r\n verb('Kepping individuals for breeding: '+str(win_individuals), nl=0,verbose=0)\r\n verb('\\nLowest result: '+str(min(prior['Results'])),verbose=0)\r\n\r\n resline = time.ctime()+'\\t'+str(pcorr_init_id)+'\\t'+str(c_mittel(prior['Results']))+'\\t'+str(c_sdev(prior['Results']))+'\\t'+str(c_mittel(dr2_list))\r\n evol_write_result(prior,outfile1)\r\n\r\n prior = evol_update(prior,special_ind,g_settings['Gen_Size'],g_settings['Max_Memory'],g_settings['Mutation'],outfile2)\r\n generation = generation + 1\r\n\r\n ## End of the evolutionary loop after the specified number of generations.\r\n ## The result is a dictionary only containing the optimal fits (prior).\r\n\r\n\r\n ## Cross-Validate the solution with the test-cases:\r\n\r\n verb('\\nEvaluating optimal parameters in test dataset..',verbose=0)\r\n \r\n mean_params = {}\r\n for p in pset.keys():\r\n param[p] = c_mittel(prior[p]['Prev']) ## Set parameters to the mean of the optimal solutions\r\n\r\n dics = initialize('test_sample.dat','Content_Data.dat',var_e1,verbose=0)\r\n pcorr_init_id = calc_identity(dics[0],var_e1,var_e2)\r\n simresult = simulate(dics[0],dics[1],simdates,param,var_e2)\r\n \r\n inex = 1/pcorr_init_id ## Mean square deviation between the waves\r\n exp = 1/simresult[0] ## Mean square deviation after simulation\r\n dr2 = (inex-exp)/inex*100 ## Proportional reduction of error: Percent of explained variance by simulation\r\n print(inex,exp,dr2)\r\n \r\n resline = resline+'\\t'+str(dr2)\r\n verb('\\n\\n >> Delta R2 (explained variance by simulation of test data): '+\"{0:2.2f}%\".format(dr2),verbose=0)\r\n verb('(Mean Delta R2 in training: '+\"{0:2.2f}%)\\n\\n\".format(c_mittel(dr2_list)),verbose=0)\r\n\r\n\r\n ## Output of Results\r\n\r\n if bootstep == 1:\r\n fullresult = open(summary_file,'w')\r\n fullresult.write('TS\\tInitial_Agreement\\tFinal_Agreement\\tSD_Final_Agreement\\tDRSQ_Training\\tDRSQ_Test')\r\n for p in sorted(prior.keys()):\r\n fullresult.write('\\tM_'+p+'\\tSD_'+p+'\\tLow_'+p+'\\tHigh_'+p)\r\n fullresult.write('\\n')\r\n fullresult.close()\r\n\r\n fullresult = open(summary_file,'a')\r\n fullresult.write(resline)\r\n\r\n for p in sorted(prior.keys()):\r\n m=c_mittel(prior[p]['Prev'])\r\n sd=c_sdev(prior[p]['Prev'])\r\n low=c_low(prior[p]['Prev'])\r\n hig=c_hig(prior[p]['Prev'])\r\n verb(p+': M='+str(m)+' ; SD='+str(sd),verbose=0)\r\n fullresult.write('\\t'+str(m)+'\\t'+str(sd)+'\\t'+str(low)+'\\t'+str(hig))\r\n\r\n fullresult.write('\\n')\r\n fullresult.close()\r\n time.sleep(5)\r\n\r\n## End of bootstrapping. The summary file now includes the bootstrapping results for further inspection.\r\n\r\n","sub_path":"Simulation_Integ_W1/ABM_within.py","file_name":"ABM_within.py","file_ext":"py","file_size_in_byte":33379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"403600405","text":"# This code computes lower bounds on adversarial risk for CIFAR10 dataset and Gaussian mixtures based on them.\n# The adversarial model is an L infinity norm perturbation adversary with budget epsilon.\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torchvision import datasets, transforms\nfrom torch.utils.data import DataLoader\nimport torchvision.models as models\n\nimport numpy as np\nfrom lapsolver import solve_dense\n\n\nfrom matplotlib import pyplot as plt\nimport pandas as pd\n\nfrom scipy.stats import norm \nfrom scipy.stats import ncx2\nfrom scipy.spatial.distance import cdist\nfrom scipy.spatial.distance import pdist\n\n\n# Select the training dataset of CIFAR10\nx_train = datasets.CIFAR10(\"../data\", train=True, download=True, transform=transforms.ToTensor())\n\n# Filtering for class labels 3 and 5 (corresponds to cats and dogs)\ntarget_inds_train = [i for i, j in enumerate(x_train.targets) if (j==3)|(j==5)]\nx_train.data = x_train.data[target_inds_train]\nx_train.targets = [x_train.targets[i] for i in target_inds_train]\n\ntrain_loader = DataLoader(x_train, batch_size = 100, shuffle=True)\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\ntorch.manual_seed(0)\n\nn = int(len(x_train.data)/2)\ndim = np.size(x_train.data[0])\n\nx0_inds = [i for i, j in enumerate(x_train.targets) if (j==3)]\nx1_inds = [i for i, j in enumerate(x_train.targets) if (j==5)]\n\nx0 = x_train.data[x0_inds].reshape((n,-1)).astype(float)\nx1 = x_train.data[x1_inds].reshape((n,-1)).astype(float)\n\n\ndist0 = pdist(x0, 'chebyshev')\ndist1 = pdist(x1, 'chebyshev')\n\n# Choose a sigma^* value based on the mean distance between pairs of datapoints from the same class\nsig_opt = (np.mean(dist0)+ np.mean(dist1))/4\n\ndists = cdist(x0,x1,'chebyshev')\n\nsig_range = [sig_opt/3, sig_opt, sig_opt*3]\n\n# Select a range of adversarial budgets\nep_range = np.arange(0, 111, 5)\n\nerr_lb_arr = np.zeros(len(ep_range))\nerr_lb_arr_sig = np.zeros((len(sig_range), len(ep_range)))\n\nfor epid in range(len(ep_range)):\n epsilon = ep_range[epid]\n costs = np.ones_like(dists)\n costs[dists<=2*epsilon]=0\n rids, cids = solve_dense(costs)\n opt_cost = 0;\n for r,c in zip(rids, cids):\n opt_cost = opt_cost + costs[r,c] \n err_lb = 0.5*(1-opt_cost/n)\n err_lb_arr[epid] = err_lb\n print('Error lower bound for epsilon = ' + str(epsilon) + ' is '+ str(err_lb)+'.')\n\n for sigid in range(len(sig_range)):\n sig = sig_range[sigid]\n opt_cost_sig = 0\n for r,c in zip(rids, cids): \n mean_diff = dists[r,c]\n prob_temp = 2*norm.cdf((mean_diff/2-epsilon)/sig)-1 \n prob_rc = costs[r,c]*prob_temp\n opt_cost_sig = opt_cost_sig + prob_rc \n err_lb_sig = 0.5*(1-opt_cost_sig/n)\n err_lb_arr_sig[sigid,epid] = err_lb_sig\n print('(With sigma) Error lower bound for epsilon = ' + str(epsilon) + ' and sigma = ' + str(sig) + ' is '+ str(err_lb_sig)+'.')\n\n\nplt.figure()\nplt.plot(ep_range, err_lb_arr, 'o-', label=r'$\\sigma$ = 0', color='k')\nplt.plot(ep_range, err_lb_arr_sig[0], 'o-', label=r'$\\sigma = \\sigma^*/3$', color='purple')\nplt.plot(ep_range, err_lb_arr_sig[1], 'o-', label=r'$\\sigma = \\sigma^*$', color='mediumorchid')\nplt.plot(ep_range, err_lb_arr_sig[2], 'o-', label=r'$\\sigma = 3 \\sigma^*$', color='deeppink')\n\nplt.legend(fontsize = 'xx-large')\nplt.xlabel(r'Adversarial budget, $\\epsilon\\times 255$', fontsize = 'xx-large')\nplt.ylabel('Adversarial error lower bound', fontsize = 'xx-large')\nplt.grid()\nplt.show()\nplt.savefig('cifar_linf.png', bbox_inches='tight')\n\n\n\n\n\n\n\n","sub_path":"Linf_bound_CIFAR10.py","file_name":"Linf_bound_CIFAR10.py","file_ext":"py","file_size_in_byte":3635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"369364667","text":"\r\n'''\r\n Read image from D: drive and copy it in E: drive:\r\n'''\r\n\r\n#Modes\r\n#rb - Read binary\r\n#wb - Write binary\r\n\r\nfile1=open(\"D:\\\\birthday.jpg\",\"rb\")\r\nfile2=open(\"E:\\\\birthday.jpg\",\"wb\")\r\n\r\nfile2.write(file1.read())\r\nfile1.close()\r\nfile2.close()\r\n\r\nprint(\"Image copied successfully!!!\")\r\n","sub_path":"29.09.2020 Python/copy_image.py","file_name":"copy_image.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"595547926","text":"import discord\nimport time\nimport cipher\n#client = discord.Client() # when not using prefixes in bot\n\n\n# for jokes\n# ------start-----\n\n\nimport numpy as np\nimport pandas as pd\n\nz = pd.read_csv('jokes.csv')\nk = z.iloc[:, 1:2]\ny = np.random.randint(0, len(np.array(k)))\nvc = \"\"\n# -----end-----\n\n# security------------------\nf = open(\"token.txt\", \"r\")\nt = f.read()\nf.close()\n#---------------------------\nt = cipher.dec(t) # <---- uncomment this code \n\n\n# Commands and Prefixes\n#------------------------------\n\nfrom discord.ext import commands\n\nclient = commands.Bot(command_prefix = '$')\n\n#------------------------------\n\n\n@client.event\nasync def on_ready():\n # await send(\"swagat ni karoge hamara!!\")\n print(\"UP AND RUNNING\")\n\n# Commands\n#------- starts -----------\nstop = False # **\n\n@client.command()\nasync def greet(ctx):\n await ctx.send(\"hello\")\n \n@client.command()\nasync def cmc(ctx):\n await ctx.send(\"cmc \"+ctx.author.mention)\n\n@client.command()\nasync def jokes(ctx):\n y = np.random.randint(0,len(np.array(k)))\n await ctx.send(np.array(k)[y][0])\n\n@client.command()\nasync def spam(ctx, *message):\n global stop\n spam = message[:]\n print(spam) \n size = spam[0]\n tim = spam[1]\n mess = \" \".join(spam[2:])\n print (size,tim,mess)\n for x in range(0,int(size)): \n if stop:\n print(\"done\")\n stop = False\n await ctx.send(\"Ok Boss spamming is on halt!!\")\n break\n else:\n time.sleep(float(tim))\n await ctx.send(mess)\n \n@client.command()\nasync def stop(ctx):\n global stop\n stop = True\n\"\"\"\n@client.command()\nasync def nikal_lavde(ctx):\n if ts:\n await ctx.invoke(client.get_command(\"discon\")) #lmao discord.py server saved my ass\n await ctx.send(\"hasta-la-vista baby!!:hand_splayed:\")\n exit()\n\"\"\"\n#----------------------- VC commands --------------------------\nfrom gtts import gTTS\nimport youtube_dl\nimport os\nts = False\nmoany = []\nfor fi in os.listdir('./NSFW/'):\n moany.append(fi)\nprint(moany)\n\n@client.command()\nasync def ping(ctx):\n ping = client.latency\n ping = round(ping*1000)\n await ctx.send(\"ping is \"+str(ping)+\"ms\")\n\n@client.command()\nasync def n(ctx,*mes):\n await ctx.send(\"https://nhentai.net/g/\"+mes[0])\n\n@client.command()\nasync def randn(ctx):\n n = np.random.randint(000,999)\n await ctx.send(\"https://nhentai.net/g/325\"+str(n))\n\n\n@client.command()\nasync def connect(ctx):\n global pl\n global vc\n global ts\n #pl = discord.FFmpegPCMAudio(executable=\"C:/Users/Sonu/Desktop/SLAM_v1.5.4(1)/ffmpeg.exe\", source=\"sup.mp3\")\n print(ctx.author.voice)\n if ctx.author.voice == None:\n await ctx.send(\"``` Not connected to your AWAZ CHANNEL !! MERI NASS PAT JAOGI MANE VC MEIN JOD!! ```\"+ctx.author.mention+\" !!\")\n else:\n channel = ctx.author.voice.channel\n vc = await channel.connect()\n ts = True\n await ctx.send(\"I am here, I AM HEERREEEEEEEE!! AT \"+str(channel))\n #vc.play(pl)\n print(channel, vc)\n\n@client.command()\nasync def disconnect(ctx):\n global ts\n print(client.voice_clients)\n if len(client.voice_clients)==0:\n await ctx.send(\"I am not connected to any VC, retarded \"+ctx.author.mention+\" !!\")\n for x in client.voice_clients:\n if len(client.voice_clients)!=0:\n await ctx.send(\"**SILENT** hojata hu warna mein hi **VIOLENT** hojaunga\")\n await x.disconnect()\n ts=False\n \n\n@client.command()\nasync def play(ctx): \n global pl\n global vc\n if ts:\n pl = discord.FFmpegPCMAudio(source=\"./meme/JOJO.mp3\")\n vc.play(pl)\n else:\n await ctx.send(\"Please connect me to any VC!! so I can play your reatarded music (-_-*) \")\n \n@client.command()\nasync def tts(ctx,*mes):\n global pl\n global vc\n #print(\"haha\")\n print(mes)\n print(\" \".join(mes))\n speech = gTTS(\" \".join(mes), 'en')\n var = \"ply\"\n #print(\"doneo\")\n speech.save(var+\".mp3\")\n print(\"ok\")\n if ts:\n pl = discord.FFmpegPCMAudio(source=\"ply.mp3\")\n vc.play(pl)\n else:\n await ctx.send(\"I am not connected nig!! FFS... \"+ctx.author.mention+\" !!\")\n\n@client.command()\nasync def pause(ctx):\n global vc\n vc.pause()\n\n@client.command()\nasync def ruk(ctx):\n global vc\n vc.stop()\n\n@client.command()\nasync def resume(ctx):\n global vc\n vc.resume()\nplayers = {}\n@client.command()\nasync def yt(ctx, url):\n global vc\n global pl\n song = False\n ydl_opts = {\n 'format': 'bestaudio/best',\n 'postprocessors': [\n {'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3',\n 'preferredquality': '192',\n }\n ],\n 'outtmpl':'./songs/%(title)s.%(ext)s',\n }\n\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n info_dict = ydl.extract_info(url,download=False)\n #print(\"hello\")\n for file in os.listdir('./songs/'):\n if file.endswith(\".webm\"):\n os.rename(file, \"song.mp3\")\n x = info_dict['title']\n print(x[:-1])\n for file in os.listdir('./songs/'):\n print(file)\n if file.startswith(x[:-1]):\n song = True\n break\n if song:\n pl = discord.FFmpegPCMAudio(source=\"./songs/\"+file)\n vc.play(pl)\n else:\n \n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n info_dict = ydl.extract_info(url,download=True)\n for file in os.listdir('./songs/'):\n if file.startswith(x[:-1]):\n break\n print(file)\n pl = discord.FFmpegPCMAudio( source=\"./songs/\"+file)\n vc.play(pl) \n\n@client.command()\nasync def moan(ctx):\n global vc\n global pl\n yi = np.random.randint(0,len(moany))\n print(\"jaj\")\n sor = \"./NSFW/\"+moany[yi]\n if ts:\n pl = discord.FFmpegPCMAudio( source=sor)\n print(\"lal\")\n vc.play(pl)\n else:\n await ctx.send(\"FFS let me in the VC!! *DIMAG SE PAIDAL*.. \"+ctx.author.mention+\" !!\")\n\n@client.command()\nasync def meme(ctx,*me):\n global vc\n global pl\n if ts:\n pl = discord.FFmpegPCMAudio(source=\"./meme/\"+me[0]+\".mp3\")\n vc.play(pl)\n else:\n await ctx.send(\"You don't deserve these memes.. \"+ctx.author.mention+\" !!\")\n\n#------------------------------ VC end ---------------------------\n\n#------- end --------------\n\n\n\n\"\"\"\n@client.event\nasync def on_message(message):\n global k\n global stop # **\n if message.author == client.user:\n print(\"\\n yeah same!!\" + \" \" + str(stop))\n if message.content.startswith(\"stop\"):\n stop = True\n if message.content.startswith('$heyo'):\n x = message.content.startswith('$heyo')\n print(x)\n await message.channel.send('\\n jhalta hai!!')\n if message.content.startswith('god kon hai'):\n await message.channel.send('senpai aap ho SAM THE GOD!!')\n if message.content.startswith('retard bot'):\n await message.channel.send('go fuck yourself!! :rage: :middle_finger: :imp: ')\n if message.content.startswith('wishes'):\n await message.channel.send('happy diwali to you all retards!!:middle_finger: :spy: ')\n#-----------------------------------------------------------\n if message.content.startswith('jokes') or message.content.startswith('Jokes'):\n y = np.random.randint(0,len(np.array(k)))\n await message.channel.send(np.array(k)[y][0])\n#-----------------------------------------------------------\n \n if message.content.startswith(\"$cmc\"):\n await message.channel.send(\"cmc \"+message.author.mention)\n\n# spamming # **\n#---------- start ------------\n if message.content.startswith('$spam'):\n spam = message.content.lower().split(\" \")\n print(spam)\n size = spam[1]\n tim = spam[2]\n mess = \" \".join(spam[3:])\n for x in range(0,int(size)): \n if stop:\n print(\"done\")\n await message.channel.send(\"Ok Boss spamming is on halt!!\")\n stop = False\n break\n else:\n time.sleep(int(tim))\n await message.channel.send(mess)\n# ---------- end -------------\n\n if message.content.startswith('rukja client'):\n await message.channel.send(\"hasta-la-vista baby!!:hand_splayed:\")\n exit(0)\n #if message.content.startswith(\"$greet\"):\n #await client.process_commands(message)\n\"\"\"\nclient.run(t)\n","sub_path":"skynet.py","file_name":"skynet.py","file_ext":"py","file_size_in_byte":8463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"30564514","text":"f1 = open('1999.txt')\r\nf2 = open('Result Combo2.txt', 'w')\r\na1 = 0\r\n\r\nfor c in f1:\r\n a1 = 0\r\n c1 = c[0:-1]\r\n f = open('Sort.txt' , 'r')\r\n for line in f:\r\n a = line[0:-1]\r\n #a = a.lower()\r\n if ('2020' in a) :\r\n a1 += 1\r\n print(str(c1) + str(' ') + str(a1))\r\n #f2.write(str(c1) + str(' ') + str(a1) + '\\n')\r\n f.close;\r\n\r\n\r\n\r\n\r\n","sub_path":"Collection/Анализ Collection #1/Года/Aa1.py","file_name":"Aa1.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"244268632","text":"from django import template # pragma: no cover\n\nregister = template.Library() # pragma: no cover\n\n\n@register.filter(name=\"divide\")\ndef divide(number, divided_by):\n try:\n return int(number) / int(divided_by)\n except (ValueError, ZeroDivisionError, TypeError):\n return None\n\n\n@register.filter(name=\"percent\")\ndef percentage(number: int) -> str:\n try:\n return f\"{int(number*100)} %\"\n except (ValueError, TypeError):\n return \"\"\n","sub_path":"src/utils/templatetags/divide.py","file_name":"divide.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"319534525","text":"#Leetcode : pass\n#TC- O(n) SP-O(1)\n#1)create result array with -1\n#2)push each elem index into stack\n#3)loop over the array twice as it's circular array\n#4)check while stack !=Null and stack_top < curr\n##4.1)store curr elem\n\nclass Solution:\n def nextGreaterElements(self, nums):\n #1\n result = [-1]*len(nums)\n stack =[]\n #3\n for x in range(len(nums)*2):\n i = x%len(nums)\n #4\n while len(stack) != 0 and nums[i] > nums[stack[-1]]:\n #4.1\n result[stack.pop()] = nums[i]\n #2 \n stack.append(i)\n return result\n\nobj = Solution()\nprint(dailyTemperatures([1,2,1]))\n","sub_path":"nextGreaterElem.py","file_name":"nextGreaterElem.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"273675959","text":"import pytest\n\nfrom dl_translate import utils\nfrom dl_translate._pairs import _PAIRS_MBART50\n\n\ndef test_dict_from_weights():\n weights = [\"mbart50\", \"mbart-large-50-many-to-many-mmt\"]\n\n valid_keys = [\"langs\", \"codes\", \"pairs\"]\n\n for w in weights:\n assert type(utils._dict_from_weights(w)) is dict\n\n keys = utils._dict_from_weights(w).keys()\n for key in valid_keys:\n assert key in keys\n\n\ndef test_dict_from_weights_exception():\n weights = [\"mbart50\", \"mbart-large-50-many-to-many-mmt\"]\n\n valid_keys = [\"langs\", \"codes\", \"pairs\"]\n\n with pytest.raises(ValueError):\n utils._dict_from_weights(\"incorrect\")\n\n\ndef test_available_languages():\n langs = utils.available_languages()\n\n for lang, _ in _PAIRS_MBART50:\n assert lang in langs\n\n\ndef test_available_codes():\n codes = utils.available_codes()\n\n for _, code in _PAIRS_MBART50:\n assert code in codes\n","sub_path":"tests/quick/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"584784808","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\" \n@author: Cyun\n@file: utils.py \n@version:\n@time: 2019/07/05\n@function: \n\"\"\"\nimport os\nimport pandas as pd\nfrom django.http import HttpResponse\n\n# 配置导出excel文件\nfile_path = r'./output.xlsx' # 导出excel文件\nwriter = pd.ExcelWriter(file_path)\n\n\nclass DF:\n \"\"\"\n 形成一个df,输出文件\n \"\"\"\n\n def __init__(self, data):\n self.file = pd.DataFrame(data)\n\n def read_file(self, fn, buf_size=262144):\n \"\"\"\n 大文件下载,设定缓存大小\n :param fn: 文件名\n :param buf_size: 缓存大小\n :return:\n \"\"\"\n f = open(fn, \"rb\")\n while True: # 循环读取\n c = f.read(buf_size)\n if c:\n yield c\n else:\n break\n f.close()\n\n\n def to_excel(self):\n self.file.to_excel(writer, index=False, encoding='utf-8', sheet_name='Sheet')\n writer.save()\n if os.path.exists(\"output.xlsx\"):\n response = HttpResponse(self.read_file(\"output.xlsx\"), content_type='application/vnd.ms-excel') #这里是重点\n response['Content-Disposition'] = 'attachment; filename=output.xlsx'\n writer.__init__(file_path) # 初始化writer\n os.remove(file_path)\n return response\n","sub_path":"pandas/前端页面导出excel/pandas_to_excel.py","file_name":"pandas_to_excel.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"206257463","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom collections import defaultdict, deque, namedtuple\nfrom copy import deepcopy\nfrom functools import lru_cache\nfrom typing import Dict, List, Set, Tuple\n\nCoord = Tuple[int, int]\nFinder = namedtuple('Finder', [ 'x', 'y', 'steps', 'start' ])\n\n\nclass Unit:\n def __init__(self, unit_type: str, x: int, y: int):\n self.type = unit_type\n self.x = x\n self.y = y\n self.attack_power = 3\n self.hp = 200\n\n def move(self, x: int, y: int):\n self.x = x\n self.y = y\n\n\ndef _rearrange(units: List[Unit]) -> List[Unit]:\n return sorted([ u for u in units if u.hp ], key=lambda u: (u.y, u.x))\n\n\ndef _valid(x: int, y: int, wall: Set[Coord], units: Set[Coord]) -> bool:\n return x > 0 and y > 0 and (x, y) not in wall and (x, y) not in units\n\n\n@lru_cache(maxsize=1024)\ndef _adjacent(x: int, y: int) -> Tuple[Coord]:\n return (x, y - 1), (x - 1, y), (x + 1, y), (x, y + 1)\n\n\ndef _parse_target(target: Set[Unit]) -> Dict[Coord, List[Unit]]:\n target_mapping = defaultdict(list)\n for t in target:\n for x, y in _adjacent(t.x, t.y):\n target_mapping[x, y].append(t)\n return target_mapping\n\n\ndef _next(u: Unit, others: Set[Unit], target_adj: Set[Coord], wall: Set[Coord]) -> Coord:\n F = []\n min_path_found = None\n Q = deque([ Finder(x, y, 1, (x, y)) for x, y in _adjacent(u.x, u.y)]) # finders need to be spawned in reading order\n others = { (o.x, o.y) for o in others }\n visited = { (u.x, u.y) }\n while Q:\n f = Q.popleft()\n if min_path_found is not None and f.steps > min_path_found:\n continue\n elif not _valid(f.x, f.y, wall, others):\n continue\n elif (f.x, f.y) in target_adj:\n min_path_found = f.steps if min_path_found is None else min(min_path_found, f.steps)\n F.append(f)\n elif (f.x, f.y) not in visited:\n visited.add((f.x, f.y))\n Q.extend([ Finder(x, y, f.steps + 1, f.start) for x, y in _adjacent(f.x, f.y) ]) # finders need to be spawned in reading order\n if not F:\n return u.x, u.y # can't move\n return min(F, key=lambda f: (f.steps, f.y, f.x, f.start[1], f.start[0])).start\n\n\ndef _fight(units: List[Unit], wall: Set[Coord], interrupt_on_death: bool=False) -> Tuple[int, List[Unit]]:\n goblins = { u for u in units if u.type == 'G' }\n elves = { u for u in units if u.type == 'E' }\n rounds = 0\n while elves and goblins:\n units = _rearrange(units)\n for n, u in ((n, u) for n, u in enumerate(units, 1) if u.hp):\n target_mapping = _parse_target(goblins if u.type == 'E' else elves)\n if (u.x, u.y) not in target_mapping:\n # not in target range\n others = { other for other in units if (u.x, u.y) != (other.x, other.y) and other.hp }\n u.move(*_next(u, others, target_mapping.keys(), wall))\n if (u.x, u.y) in target_mapping:\n # attack\n target = min(target_mapping[u.x, u.y], key=lambda t: (t.hp, t.y, t.x))\n target.hp = max(0, target.hp - u.attack_power)\n if not target.hp:\n if target.type == 'G':\n goblins = { u for u in units if u.type == 'G' and u.hp }\n else:\n if interrupt_on_death:\n return None, None\n elves = { u for u in units if u.type == 'E' and u.hp }\n if not elves or not goblins:\n break\n if goblins and elves or n == len(units):\n rounds += 1\n return rounds, [ u for u in units if u.hp ]\n\n\ndef part1(units: List[Unit], wall: Set[Coord]) -> int:\n rounds, units = _fight(units, wall)\n return rounds * sum(u.hp for u in units)\n\n\ndef part2(units: List[Unit], wall: Set[Coord]) -> int:\n elf_attack_power = 4\n while True:\n for u in filter(lambda u: u.type == 'E', units):\n u.attack_power = elf_attack_power\n rounds, survivors = _fight(deepcopy(units), wall, True)\n if rounds is not None:\n return rounds * sum(s.hp for s in survivors)\n elf_attack_power += 1\n\n\ndef _parse(filename: str) -> Tuple[List[Unit], Set[Coord]]:\n with open(filename) as f:\n units = []\n wall = set()\n for y, line in enumerate(f.read().splitlines(), 1):\n for x, c in enumerate(line, 1):\n if c in 'EG':\n units.append(Unit(c, x, y))\n elif c == '#':\n wall.add((x, y))\n return units, wall\n\n\nif __name__ == '__main__':\n units, wall = _parse('input.txt')\n print(part1(deepcopy(units), wall)) # 225096\n print(part2(units, wall)) # 35354\n","sub_path":"2018/day_15/15.py","file_name":"15.py","file_ext":"py","file_size_in_byte":4790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"154062334","text":"#\n# Example file for working with loops\n#\n\ndef main():\n x = 0\n\n # define a while loop\n# while (x<5):\n# print(x)\n# x = x + 1\n\n # define a for loop (start at first but does not include the second number)\n # for x in range(5, 10):\n # print(x)\n\n # use a for loop over a collection\n # days = [\"Mon\", \"Tue\", \"Wed\", \"Thur\", \"Fri\", \"Sat\", \"Sun\"]\n # for d in days:\n # print(d)\n \n \n # use the break and continue statements\n # for x in range(5, 10):\n # # if (x==7): break\n # if (x%2 == 0): continue # skips the rest of the processing and returns to the next iteration of the for loop\n # print(x)\n\n #using the enumerate() function to get index \n days = [\"Mon\", \"Tue\", \"Wed\", \"Thur\", \"Fri\", \"Sat\", \"Sun\"]\n # enumerate allows for the index of the array to be noted\n for i,d in enumerate(days):\n print(i, d)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Python/LinkedIn/loops_start.py","file_name":"loops_start.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"613283159","text":"# coding: utf-8\nimport re\nfrom tqdm import tqdm\nimport pickle\n\n\ndef extract_mention_and_entity(exp):\n tmp = exp[2:-2]\n tmp2 = tmp[0].upper() + tmp[1:]\n if \"|\" in tmp2:\n entity, mention = tmp2.split(\"|\")\n mention = mention.strip()\n else:\n entity = tmp2[:]\n mention = tmp[:]\n entity = entity.strip()\n entity = entity.replace(\" \", \"_\")\n return entity, mention\n\n\nif __name__ == \"__main__\":\n reg = re.compile(r\"\\[\\[.+?\\]\\]\")\n out = {}\n counter = 0\n with open(\"dump\", errors='ignore') as f1:\n for line in tqdm(f1):\n ents = []\n mentions = []\n for x in re.findall(reg, line):\n try:\n entity, mention = extract_mention_and_entity(x)\n except Exception:\n continue\n key = (entity, mention)\n if key in out:\n continue\n out[key] = counter\n counter += 1\n\n with open(\"me2id.pkl\", \"wb\") as f2:\n pickle.dump(out, f2)\n","sub_path":"mention_and_graph.py","file_name":"mention_and_graph.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"609534420","text":"def insertion_sort(lista):\r\n \r\n # Traverse through 1 to len(arr)\r\n for i in range(1, len(lista)):\r\n \r\n key = lista[i]\r\n \r\n # Move elements of arr[0..i-1], that are\r\n # greater than key, to one position ahead\r\n # of their current position\r\n j = i-1\r\n while j >=0 and key < lista[j] :\r\n lista[j+1] = lista[j]\r\n j -= 1\r\n lista[j+1] = key\r\n return lista\r\n","sub_path":"insertion_sort.py","file_name":"insertion_sort.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"82175769","text":"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport argparse\nimport glob\nimport multiprocessing as mp\nimport os\nimport time\nimport cv2\nimport tqdm\n\nfrom detectron2.config import get_cfg\nfrom detectron2.data.detection_utils import read_image\nfrom detectron2.utils.logger import setup_logger\nimport os.path as osp\nfrom predictor import VisualizationDemo\nimport json\nimport numpy as np\nimport pickle\n\n# constants\nWINDOW_NAME = \"COCO detections\"\n\n\ndef setup_cfg(args):\n # load config from file and command-line arguments\n cfg = get_cfg()\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n # Set score_threshold for builtin models\n cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold\n cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold\n cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold\n cfg.freeze()\n return cfg\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(description=\"Detectron2 Demo\")\n parser.add_argument(\n \"--config-file\",\n default=\"configs/quick_schedules/e2e_mask_rcnn_R_50_FPN_inference_acc_test.yaml\",\n metavar=\"FILE\",\n help=\"path to config file\",\n )\n parser.add_argument(\"--webcam\", action=\"store_true\", help=\"Take inputs from webcam.\")\n parser.add_argument(\"--video-input\", help=\"Path to video file.\")\n parser.add_argument(\"--input\", nargs=\"+\", help=\"A list of space separated input images\")\n parser.add_argument(\n \"--output\",\n help=\"A file or directory to save output visualizations. \"\n \"If not given, will show output in an OpenCV window.\",\n )\n\n parser.add_argument(\n \"--confidence-threshold\",\n type=float,\n default=0.5,\n help=\"Minimum score for instance predictions to be shown\",\n )\n parser.add_argument(\n \"--opts\",\n help=\"Modify model config options using the command-line\",\n default=[],\n nargs=argparse.REMAINDER,\n )\n return parser\n\n\nif __name__ == \"__main__\":\n mp.set_start_method(\"spawn\", force=True)\n args = get_parser().parse_args()\n logger = setup_logger()\n logger.info(\"Arguments: \" + str(args))\n cfg = setup_cfg(args)\n demo = VisualizationDemo(cfg)\n\n path = './coco2017/train2017'\n ignore_imgs = json.load(open('./RefSegDatasets/refseg_anno/all_img_name.json', 'r'))\n ignore_vt = json.load(open('./RefSegDatasets/refseg_anno/all_img_name_vt.json', 'r'))\n\n precomp_annos = {}\n for im_id, img_name in enumerate(ignore_imgs):\n img_name = img_name.split('_')[-1]\n info = {}\n img = read_image(osp.join(path, img_name), format=\"BGR\")\n h,w,c = img.shape\n info['width'] = w\n info['height'] = h\n\n predictions, visualized_output = demo.run_on_image(img)\n boxes = predictions['instances'].pred_boxes.tensor.cpu().numpy()\n pred_score = predictions['instances'].scores.cpu().numpy()[:, None]\n pred_cls = predictions['instances'].pred_classes.cpu().numpy()[:, None]\n boxes = np.concatenate((boxes, pred_score, pred_cls), axis=1)\n info['boxes'] = boxes\n info['img_scale'] = predictions['im_scale']\n precomp_annos[img_name] = info\n # cv2.imwrite('test.png', visualized_output.get_image()[:, :, ::-1])\n\n print('refcoco&+', '{}/{}'.format(im_id, len(ignore_imgs)), img_name, 'have {} boxes'.format(boxes.shape[0]), 'done')\n\n with open(osp.join(\"./RefSegDatasets/refseg_anno/ref_precomp_annos_nms0p5_s0p1.pkl\"), 'wb') as dump_f:\n pickle.dump(precomp_annos, dump_f)\n\n # path = './coco2017/train2017'\n # ignore_imgs = json.load(open('./RefSegDatasets/refseg_anno/all_img_name.json', 'r'))\n # precomp_annos = {}\n #\n # num_imgs = len(ignore_imgs)\n # for im_id, img_name in enumerate(ignore_imgs):\n # img_name = img_name.split('_')[-1]\n # info = {}\n # img = read_image(osp.join(path, img_name), format=\"BGR\")\n # h, w, c = img.shape\n # info['width'] = w\n # info['height'] = h\n # precomp_annos[img_name] = info\n # print('{}/{}, {}, done'.format(im_id, num_imgs, img_name))\n #\n # with open(osp.join(\"./RefSegDatasets/refseg_anno/all_images_hw.pkl\"), 'wb') as dump_f:\n # pickle.dump(precomp_annos, dump_f)\n\n # if args.input:\n # if len(args.input) == 1:\n # args.input = glob.glob(os.path.expanduser(args.input[0]))\n # for path in tqdm.tqdm(args.input, disable=not args.output):\n # # use PIL, to be consistent with evaluation\n # img = read_image(path, format=\"BGR\")\n # start_time = time.time()\n # predictions, visualized_output = demo.run_on_image(img)\n # logger.info(\n # \"{}: detected {} instances in {:.2f}s\".format(\n # path, len(predictions[\"instances\"]), time.time() - start_time\n # )\n # )\n #\n # if args.output:\n # if os.path.isdir(args.output):\n # assert os.path.isdir(args.output), args.output\n # out_filename = os.path.join(args.output, os.path.basename(path))\n # else:\n # assert len(args.input) == 1, \"Please specify a directory with args.output\"\n # out_filename = args.output\n # visualized_output.save(out_filename)\n # else:\n # cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)\n # cv2.imshow(WINDOW_NAME, visualized_output.get_image()[:, :, ::-1])\n # if cv2.waitKey(0) == 27:\n # break # esc to quit\n # elif args.webcam:\n # assert args.input is None, \"Cannot have both --input and --webcam!\"\n # cam = cv2.VideoCapture(0)\n # for vis in tqdm.tqdm(demo.run_on_video(cam)):\n # cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)\n # cv2.imshow(WINDOW_NAME, vis)\n # if cv2.waitKey(1) == 27:\n # break # esc to quit\n # cv2.destroyAllWindows()\n # elif args.video_input:\n # video = cv2.VideoCapture(args.video_input)\n # width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))\n # height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))\n # frames_per_second = video.get(cv2.CAP_PROP_FPS)\n # num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))\n # basename = os.path.basename(args.video_input)\n #\n # if args.output:\n # if os.path.isdir(args.output):\n # output_fname = os.path.join(args.output, basename)\n # output_fname = os.path.splitext(output_fname)[0] + \".mkv\"\n # else:\n # output_fname = args.output\n # assert not os.path.isfile(output_fname), output_fname\n # output_file = cv2.VideoWriter(\n # filename=output_fname,\n # # some installation of opencv may not support x264 (due to its license),\n # # you can try other format (e.g. MPEG)\n # fourcc=cv2.VideoWriter_fourcc(*\"x264\"),\n # fps=float(frames_per_second),\n # frameSize=(width, height),\n # isColor=True,\n # )\n # assert os.path.isfile(args.video_input)\n # for vis_frame in tqdm.tqdm(demo.run_on_video(video), total=num_frames):\n # if args.output:\n # output_file.write(vis_frame)\n # else:\n # cv2.namedWindow(basename, cv2.WINDOW_NORMAL)\n # cv2.imshow(basename, vis_frame)\n # if cv2.waitKey(1) == 27:\n # break # esc to quit\n # video.release()\n # if args.output:\n # output_file.release()\n # else:\n # cv2.destroyAllWindows()\n","sub_path":"demo/demo_run_imgs.py","file_name":"demo_run_imgs.py","file_ext":"py","file_size_in_byte":7837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"184159894","text":"import _thread\nimport RPi.GPIO as GPIO\nimport time\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\n\nGPIO_TRIGGER = 4\nGPIO_ECHO = 17\nGPIO_BUZZER = 21\nGPIO_RED_LED = 20\nGPIO_GREEN_LED = 16\n\nGPIO.setup(GPIO_TRIGGER, GPIO.OUT)\nGPIO.setup(GPIO_ECHO, GPIO.IN)\nGPIO.setup(GPIO_BUZZER, GPIO.OUT)\nGPIO.setup(GPIO_RED_LED, GPIO.OUT)\nGPIO.setup(GPIO_GREEN_LED, GPIO.OUT)\n\nGPIO.output(GPIO_TRIGGER, False)\nGPIO.output(GPIO_BUZZER, False)\nGPIO.output(GPIO_RED_LED, False)\nGPIO.output(GPIO_GREEN_LED, True)\n\nkeepRunning = True\ndistance = 0\n\ndef measureDistance():\n GPIO.output(GPIO_TRIGGER, True)\n time.sleep(0.00001)\n GPIO.output(GPIO_TRIGGER, False)\n start = time.time()\n stop = time.time()\n while GPIO.input(GPIO_ECHO) == 0:\n start = time.time()\n\n while GPIO.input(GPIO_ECHO) == 1:\n stop = time.time()\n\n elapsed = stop - start\n distance = elapsed * 17150\n return distance\n\ndef playSound(threadName, delay):\n keepRunning\n distance\n while keepRunning:\n if distance <= 30:\n GPIO.output(GPIO_BUZZER, True)\n time.sleep(0.01 * distance)\n GPIO.output(GPIO_GREEN_LED, False)\n GPIO.output(GPIO_RED_LED, True)\n \n GPIO.output(GPIO_BUZZER, False)\n time.sleep(0.05 * distance)\n GPIO.output(GPIO_RED_LED, False)\n GPIO.output(GPIO_GREEN_LED, True)\n\n time.sleep(delay)\n\ntry:\n distance = measureDistance()\n _thread.start_new_thread(playSound, (\"BuzzerThread1\", 0.01))\n while True:\n print (distance)\n time.sleep(0.1)\n distance = measureDistance()\nexcept:\n keepRunning = False\n time.sleep(1)\n GPIO.cleanup()\n\n","sub_path":"project3.py","file_name":"project3.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"250609371","text":"#\r\n# @lc app=leetcode id=643 lang=python3\r\n#\r\n# [643] Maximum Average Subarray I\r\n#\r\n# https://leetcode.com/problems/maximum-average-subarray-i/description/\r\n#\r\n# algorithms\r\n# Easy (38.48%)\r\n# Total Accepted: 42.7K\r\n# Total Submissions: 111.1K\r\n# Testcase Example: '[1,12,-5,-6,50,3]\\n4'\r\n#\r\n# \r\n# Given an array consisting of n integers, find the contiguous subarray of\r\n# given length k that has the maximum average value. And you need to output the\r\n# maximum average value.\r\n# \r\n# \r\n# Example 1:\r\n# \r\n# Input: [1,12,-5,-6,50,3], k = 4\r\n# Output: 12.75\r\n# Explanation: Maximum average is (12-5-6+50)/4 = 51/4 = 12.75\r\n# \r\n# \r\n# \r\n# Note:\r\n# \r\n# 1 k n \r\n# Elements of the given array will be in the range [-10,000, 10,000].\r\n# \r\n# \r\n#\r\nclass Solution:\r\n def findMaxAverage(self, nums, k):\r\n \"\"\"\r\n :type nums: List[int]\r\n :type k: int\r\n :rtype: float\r\n \"\"\"\r\n # i = 0\r\n # max = -10000.0\r\n # while i < len(nums) and i + k <= len(nums):\r\n # if sum(nums[i:i+k]) > max:\r\n # max = sum(nums[i:i+k])\r\n # print(i)\r\n # print(max)\r\n # i = i + 1\r\n # return max/k\r\n sum = 0.0\r\n maxsum = -100000.0\r\n for i in range(len(nums)):\r\n sum += nums[i]\r\n if i >= k:\r\n sum -= nums[i-k]\r\n if i >= k-1:\r\n maxsum = max(sum/k,maxsum)\r\n return maxsum\r\n \r\nif __name__ == \"__main__\":\r\n print(Solution().findMaxAverage([1,12,-5,-6,50,3],4))\r\n","sub_path":"leetcode/643.maximum-average-subarray-i.py","file_name":"643.maximum-average-subarray-i.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"653926740","text":"from helpers import delay_next_check, log, locate_game_window, get_value_from_rect, click_on_box, escape, click_image, click, click_next\nimport pyautogui\nimport time\nimport settings\nimport datetime\n\nlast_check = datetime.datetime.now()\nif last_check.hour == 0:\n last_check = last_check.replace(minute=0)\nelse:\n last_check = last_check.replace(hour=last_check.hour - 1) # Remove 1 hour to make sure it checks first run\n\n\ndef check_skill_points():\n global last_check\n if not settings.skill_points:\n return\n # delay_msg = \"Checked skillpoints at \" + str(last_check) + \". Waiting until 30 minutes has passed\"\n if delay_next_check(15, last_check):\n return\n\n heroes_button_located = pyautogui.locateOnScreen('imgs/herosbutton.png', confidence=0.95)\n if heroes_button_located is not None:\n last_check = datetime.datetime.now()\n log(\"Checking skill points\")\n click(508, 720) # Click heroes button\n time.sleep(2)\n click(140, 260) # Click first hero\n time.sleep(2)\n click(1180, 350) # Click skill book\n time.sleep(2)\n points = get_value_from_rect(settings.game_x + 759, settings.game_y + 198, settings.game_x + 795,\n settings.game_y + 225)\n #log(str(points))\n #log(\"Distributing points\")\n for x in range(0, settings.amount_heroes):\n try:\n #log(\"distributing point \" + str(x))\n if int(points) < 1:\n log(\"No points, breaking\")\n break\n #click(1056, 337) # Skill points 1-3\n #time.sleep(0.2)\n #click(1056, 500)\n #time.sleep(0.2)\n #click(1056, 650)\n time.sleep(0.4)\n skill_add = pyautogui.locateOnScreen('imgs/skillpointadd.png', confidence=0.99)\n if skill_add is None:\n time.sleep(0.4)\n click(493, 400) # Next hero\n log(\"Hero is filled, checking next\")\n else:\n time.sleep(0.4)\n log(\"Adding point\")\n click_on_box(skill_add)\n time.sleep(0.2)\n points = get_value_from_rect(settings.game_x + 759, settings.game_y + 198, settings.game_x + 795,\n settings.game_y + 225)\n log(\"Points available: \" + str(points))\n except ValueError:\n log(\"Tesseract error, waiting until next cycle\")\n escape(1)\n break\n log(\"Done distributing points\")\n escape(2)\n","sub_path":"skillpoints.py","file_name":"skillpoints.py","file_ext":"py","file_size_in_byte":2677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"487248033","text":"import json\n\nfrom db_collection_structures.tweet import Tweet\nimport time\nfrom pathlib import Path\nimport tweepy\nfrom utils.twitter_services.authentication import TwitterConnection\n\n\nclass Streamer(tweepy.StreamListener):\n\n def __init__(self, time_limit, _connection_handle):\n super().__init__()\n self.start_time = time.time()\n self.limit = time_limit\n self.db, self.collection, self.db_url = _connection_handle\n self.tweet_handle = Tweet(db=self.db, collection=self.collection, db_url=self.db_url)\n\n def on_data(self, raw_data):\n if (time.time() - self.start_time) < self.limit:\n print(raw_data)\n tweet = json.loads(raw_data.lower())\n if tweet['truncated']:\n tweet['text'] = tweet.get('extended_tweet').get('full_text')\n self.tweet_handle.document_insert(tweet)\n return True\n else:\n self.tweet_handle.conn.db_disconnection()\n print('time out')\n return False\n\n def on_error(self, status_code):\n retry_count = 0\n if status_code == 420:\n time.sleep(15 * 60)\n retry_count += 1\n if retry_count >= 2:\n self.tweet_handle.conn.db_disconnection()\n return False\n\n\nclass StreamingKeyword:\n def __init__(self, _credential_file_path, keyword_, time_limit, _connection_handle):\n authentication = TwitterConnection(_credential_file_path)\n self.api = authentication.conn\n self.keyword = keyword_\n self.time_limit = time_limit\n self.connection_handle = _connection_handle\n\n def streamer(self):\n stream_listener = Streamer(self.time_limit, self.connection_handle)\n stream = tweepy.Stream(auth=self.api.auth, listener=stream_listener)\n stream.filter(track=self.keyword)\n\n\nif __name__ == '__main__':\n streaming_duration = 10\n pwd = Path(__file__).parent.parent\n credential_file_path = pwd / 'credentials' / 'credentials.json'\n keyword = ['keyword']\n db_alias = 'tweet'\n collection = 'twitter_extended'\n db_url = 'localhost:27017'\n connection_handle = (db_alias, collection, db_url)\n\n target = StreamingKeyword(credential_file_path, keyword, streaming_duration, connection_handle)\n target.streamer()\n","sub_path":"operations/streaming_keyword.py","file_name":"streaming_keyword.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"56880753","text":"from tensorflow.python.keras.models import load_model\nfrom tensorflow.python.keras import models\nimport matplotlib.pyplot as plt\n\nmodel = load_model('data/cats_and_dogs_small_2.h5')\nprint(model.summary())\n\nimg_path = 'data/cats_and_dogs_small/test/cats/cat.1700.jpg'\n\nfrom tensorflow.python.keras.preprocessing import image\nimport numpy as np\n\nimg = image.load_img(img_path, target_size=(150, 150))\nimg_tensor = image.img_to_array(img)\nprint(img_tensor.shape) # (150,150,3) = (width, height, channel)\nimg_tensor = np.expand_dims(img_tensor, axis=0)\nprint(img_tensor.shape) # (1, 150,150,3) = (batch, width, height, channel) 형태로 만듦\n\n#정규화\nimg_tensor /= 255.\nplt.imshow(img_tensor[0])\n# plt.show()\n\n# 8번째(0~7) 레이어까지가 convolution layer(conv+pool)\nlayer_outputs = [layer.output for layer in model.layers[:8]]\n\nactivation_model = models.Model(inputs=model.input, outputs=layer_outputs)\nprint(activation_model.summary())\n\n# activations = output\nactivations = activation_model.predict(img_tensor)\n# first_layer_activation = activations[0]\n# plt.matshow(first_layer_activation[0, :, :, 19], cmap='viridis')\n# plt.matshow(first_layer_activation[0, :, :, 15], cmap='viridis')\n# plt.matshow(first_layer_activation[0, :, :, 16], cmap='viridis')\n# plt.show()\n\n# 변수 이름이 n_cols라 엄청나게 헷갈렸음\nlayer_names = [layer.name for layer in model.layers[:8]]\nimages_per_row = 16\nfor layer_name, layer_activation in zip(layer_names, activations):\n n_features = layer_activation.shape[-1] # kernel(feature)의 개수\n size = layer_activation.shape[1] #index2도 가능 정사각형 형태의 kernel이기 때문\n n_cols = n_features // images_per_row # 32/16, 64/16, 128/64, ...\n display_grid = np.zeros((n_cols * size, images_per_row * size)) # (row, column)\n print(display_grid.shape)\n\n for col in range(n_cols):\n for row in range(images_per_row):\n channel_image = layer_activation[0, :, :, col * images_per_row + row]\n\n # 이미지를 보다 부드럽게 표현\n channel_image -= channel_image.mean()\n channel_image /= channel_image.std()\n channel_image *= 64\n channel_image += 128\n channel_image = np.clip(channel_image, 0, 255).astype('uint8')\n\n display_grid[col * size : (col + 1) * size,\n row * size : (row + 1) * size] = channel_image\n\n scale = 1. / size\n plt.figure(figsize=(scale * display_grid.shape[1],\n scale * display_grid.shape[0])) #(row, col)\n plt.title(layer_name)\n plt.imshow(display_grid, aspect='auto', cmap='viridis')\nplt.show()","sub_path":"Python/cnn_study_day5/day4/19_ReadConvImageEx.py","file_name":"19_ReadConvImageEx.py","file_ext":"py","file_size_in_byte":2652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"114383234","text":"#!/usr/bin/python\n\nfrom scipy.stats import nbinom\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Simple simulation for cases where diamond at Unbreaking III break\n# before netherite at Unbreaking II\n#\n# The math involves a simple sum of random variables compared against zero\n# but the simulation provides a convenient sanity check.\n\nif __name__ == '__main__':\n\n trials = 1000000\n\n # Diamond at Unbreaking III\n rv1 = nbinom.rvs(1561, 1/4, size=trials) + 1561\n\n # Netherite at Unbreaking II\n rv2 = nbinom.rvs(2031, 1/3, size=trials) + 2031\n\n c = [0, 0]\n for i in range(trials):\n if rv1[i] < rv2[i]:\n c[0] += 1\n else:\n c[1] += 1\n\n print(c[0]/(c[0]+c[1]))\n","sub_path":"sim.py","file_name":"sim.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"617754492","text":"import time\n# from okex import OkEx\nfrom OkcoinFutureAPI import OKCoinFuture\nimport requests\nimport json\nimport re\nfrom email.mime.text import MIMEText\nfrom email.header import Header\nfrom smtplib import SMTP_SSL\nimport numpy as np\n\naccess_key = 'your-access-key'\nsecret_key = 'your-secret-key'\n\nurl = 'https://www.okex.com'\nslippage_fee = 0.0002\ntaker_fee = 0.0003\nmaker_fee = 0.0001\nokcoinFuture = OKCoinFuture(url,access_key,secret_key)\n\n\ndef get_future_quarter_close_price(trade_token):\n \"\"\"\n\n Get the quarter future trade token's close price.\n\n :param trade_token:\n :return:\n \"\"\"\n info_url = 'https://www.okex.com/api/v1/future_ticker.do?symbol=%s&contract_type=this_week' % trade_token\n info_url_content = requests.get(info_url).content\n price_data = json.loads(info_url_content)\n close_price = price_data['ticker']['last']\n print('\\033[0;32;40m\\tThe %s close price is:\\033[0m \\033[0;31;40m\\t%s\\033[0m' % (trade_token,close_price))\n return close_price\n\n\ndef get_account_balance(token):\n \"\"\"\n\n Get your account's balance.\n\n :param token:\n :return:\n \"\"\"\n user_info = okcoinFuture.future_userinfo()\n data = json.loads(user_info)\n holder_balance = data['info'][token]['account_rights']\n print('\\033[0;32;40m\\tYour %s balance is:\\033[0m \\033[0;31;40m\\t%s\\033[0m'%(token,holder_balance))\n return holder_balance\n\n\ndef get_holder_info(trade_token):\n \"\"\"\n\n Get your quarter future holding info.\n lever rate is 10x or 20x.\n contract type is this week, next week or quarter.\n buy/sell amount is your order direction's holding.\n buy/sell price average is your order average price.\n buy/sell available is your order average buy/sell amount.\n\n :param trade_token:\n :return:\n \"\"\"\n user_holder_info = okcoinFuture.future_position(trade_token,'this_week')\n holding_profit_all_data = json.loads(user_holder_info)\n holding_profit_data = holding_profit_all_data['holding']\n holding_force_price = holding_profit_all_data['force_liqu_price']\n for holding_details in holding_profit_data:\n if direction is '1':\n order_avg_price = holding_details['buy_price_avg']\n order_holding_amount = holding_details['buy_amount']\n order_available_amount = holding_details['buy_available']\n elif direction is '2':\n order_avg_price = holding_details['sell_price_avg']\n order_holding_amount = holding_details['sell_amount']\n order_available_amount = holding_details['sell_available']\n # print(holding_details)\n print('\\033[0;32;40m\\tYour holding average price is:\\033[0m \\033[0;31;40m\\t{0:*^30}\\033[0m'.format(order_avg_price))\n print('\\033[0;32;40m\\tYour holding amount is:\\033[0m \\033[0;31;40m\\t{0:*^30}\\033[0m'.format(order_holding_amount))\n print('\\033[0;32;40m\\tYour available amount is:\\033[0m \\033[0;31;40m\\t{0:*^30}\\033[0m'.format(order_available_amount))\n print('\\033[0;32;40m\\tYour force liquidation price is:\\033[0m \\033[0;31;40m\\t{0:*^30}\\033[0m'.format(holding_force_price))\n print('')\n return order_avg_price,order_holding_amount,order_available_amount\n\n\ndef calculate_profit(open_order_price,close_price,direction,margin):\n \"\"\"\n\n Calculate your holding profit and profit percent.\n\n :param open_order_price:\n :param close_price:\n :param direction:\n :param margin:\n :return:\n \"\"\"\n if direction is '1':\n profit_meta = (close_price - open_order_price) / open_order_price\n profit = profit_meta * 20 * float(margin)\n profit_percent = profit / float(margin) * 100\n print('\\033[0;32;40m\\tYour profit is:\\033[0m \\033[0;31;40m\\t%.5f\\033[0m' % profit)\n print('\\033[0;32;40m\\tYour profit percent is:\\033[0m \\033[0;31;40m\\t%.3f%%\\033[0m' % profit_percent)\n elif direction is '2':\n profit_meta = (open_order_price - close_price) / close_price\n profit = profit_meta * 20 * float(margin)\n profit_percent = profit / float(margin) * 100\n print('\\033[0;32;40m\\tYour profit is:\\033[0m \\033[0;31;40m\\t%.5f\\033[0m' % profit)\n print('\\033[0;32;40m\\tYour profit percent is:\\033[0m \\033[0;31;40m\\t%.3f%%\\033[0m' % profit_percent)\n return profit,profit_percent\n\n\ndef get_future_position(trade_token):\n \"\"\"\n\n Get all your holding position.\n\n :param trade_token:\n :return:\n \"\"\"\n future_position = okcoinFuture.future_position(trade_token,'this_week')\n print(future_position)\n\n\ndef future_order(trade_token,order_price,order_amount,order_type):\n \"\"\"\n\n Make quarter future order.order type is: 1:开多 2:开空 3:平多 4:平空\n\n :param trade_token:\n :param order_price:\n :param order_amount:\n :param trade_type:\n :return:\n \"\"\"\n order_future = okcoinFuture.future_trade(trade_token,'this_week',order_price,order_amount,order_type,'0','20')\n order_details = json.loads(order_future)\n order_id = order_details['order_id']\n print(order_future)\n print(order_id)\n return order_id\n\n\ndef order_close(trade_token,order_price,order_amount,direction):\n \"\"\"\n\n Close your holding. order type is: 1:开多 2:开空 3:平多 4:平空\n\n :param trade_token:\n :param order_price:\n :param order_amount:\n :param direction:\n :return:\n \"\"\"\n order_future = okcoinFuture.future_trade(trade_token, 'this_week', order_price, order_amount, direction, '0', '20')\n order_details = json.loads(order_future)\n if order_details['result'] is True:\n print('Close order is OK.')\n else:\n print('I got an Error!')\n print(order_future)\n print(order_details['result'])\n\n\ndef order_cancel(trade_token,order_id):\n \"\"\"\n\n Cancel the special id order.\n\n :param trade_token:\n :param order_id:\n :return:\n \"\"\"\n cancel_order = okcoinFuture.future_cancel(trade_token,'this_week',order_id)\n print(cancel_order)\n\n\n# def save_to_file(get_time,EOS_balance,ADD_balance,bid_avg_cost,delivery_price,auto_volume):\n# \"\"\"\n#\n# save the param to csv file, include get_time, EOS_balance, ADD_balance, bid_avg_cost, delivery_price and auto_volume.\n#\n# :param get_time:\n# :param EOS_balance:\n# :param ADD_balance:\n# :param bid_avg_cost:\n# :param delivery_price:\n# :param auto_volume:\n# :return:\n# \"\"\"\n# with open('bid_avg_cost.csv','a',) as f:\n# fieldnames = ['Run_Time','EOS_Balance','ADD_Balance','Bid_Avg_Cost','Delivery_Price','Auto_Volume']\n# writer = csv.DictWriter(f,fieldnames=fieldnames)\n# if os.path.getsize('bid_avg_cost.csv') == 0:\n# writer.writeheader()\n# writer.writerow({'Run_Time':get_time,'EOS_Balance':EOS_balance,'ADD_Balance':ADD_balance,\n# 'Bid_Avg_Cost':bid_avg_cost,'Delivery_Price':delivery_price,'Auto_Volume':auto_volume})\n# f.close()\n\n\ndef alarm_send_email(email_content):\n \"\"\"\n\n send the alarm email.\n\n :param email_content:\n :return:\n \"\"\"\n host_server = 'smtp.163.com'\n sender = 'your-email'\n receiver = 'receiver-email'\n sender_passwd = 'your-email-password'\n # email_content = \"Hi,

Your script is stopping...

\"\n email_title = 'Bot Alarming~'\n smtp = SMTP_SSL(host_server)\n smtp.set_debuglevel(1)\n smtp.ehlo(host_server)\n smtp.login(sender,sender_passwd)\n msg = MIMEText(email_content, \"html\", 'utf-8')\n msg['Subject'] = Header(email_title, 'utf-8')\n msg['From'] = sender\n msg['To'] = Header(\"Lei\", 'utf-8')\n smtp.sendmail(sender, receiver, msg.as_string())\n smtp.quit()\n\n\nif __name__ == '__main__':\n token = input('Please input your tarde token:\\n')\n # order_price = input('Please input your order price:\\n')\n # order_amount = input('Please input your order amount:\\n')\n # token = 'btc'\n print('Your trade token is BTC......')\n print('')\n direction = input('Please select your order direction just now:\\n 1:开多 2:开空 3:平多 4:平空\\n')\n trade_token = token + '_usd'\n margin = input('Please watch your margin:\\n')\n positive_profit_list = []\n negative_profit_list = []\n positive_price_list = []\n negative_price_list = []\n pos_retrace_profit_list = []\n neg_retrace_profit_list =[]\n pos_retrace_price_list = []\n neg_trace_price_list = []\n pos_avg_price = []\n neg_avg_price = []\n pos_avg_profit = []\n neg_avg_profit = []\n i = 0\n # base_profit_percent = 7\n # print('base_percent is {}'.format(base_profit_percent))\n while True:\n close_price = get_future_quarter_close_price(trade_token)\n open_order_price,all_amount,available_order_amount = get_holder_info(trade_token)\n profit_meta,profit_percent_meta = calculate_profit(open_order_price,close_price,direction,margin)\n profit = round(profit_meta,5)\n profit_percent = round(profit_percent_meta,3)\n # base_profit_percent = 7\n # print('base_percent is {}'.format(base_profit_percent))\n if profit_percent > 7 and len(positive_profit_list) > 0:\n print('+profit and != None')\n print('positive is {}'.format(positive_profit_list))\n pos_list_max_profit = positive_profit_list[0]\n pos_list_max_price = positive_price_list[0]\n pos_retrace_profit = pos_list_max_profit * 0.63\n pos_retrace_price = pos_list_max_price * 0.63\n if profit_percent > pos_list_max_profit:\n positive_profit_list.append(profit_percent)\n positive_profit_list.sort(reverse=True)\n pos_list_max_profit = positive_profit_list[0]\n pos_retrace_profit = pos_list_max_profit * 0.63\n pos_retrace_profit_list.append(pos_retrace_profit)\n positive_price_list.append(close_price)\n positive_price_list.sort(reverse=True)\n pos_list_max_price = positive_price_list[0]\n pos_retrace_price = pos_list_max_price * 0.63\n pos_retrace_price_list.append(pos_retrace_price)\n print('profit_percent > poos_list_max_profit')\n elif pos_retrace_profit <= profit_percent <= positive_profit_list[0]:\n pos_avg_profit.append(profit_percent)\n pos_avg_price.append(close_price)\n print('pos_retrace_profit <= profit_percent <= pos_list_max_profit')\n print('pos_avg_profit list is {}'.format(pos_avg_profit))\n print('pos_avg_price list is {}'.format(pos_avg_price))\n i = i + 1\n if i == 4:\n pos_avg_order_profit = round(np.mean(pos_avg_profit),3)\n pos_avg_order_price = round(np.mean(pos_avg_price), 3)\n order_close(trade_token, pos_avg_order_price, available_order_amount, int(direction) + 2)\n message = 'Your order profit percent is %s%%, order profit is %s and I close the order......' % (\n pos_avg_order_profit, profit)\n print(alarm_send_email(message))\n print('pos_retrace_profit <= profit_percent <= pos_list_max_profit')\n print('pos_avg_order_profit is {}'.format(pos_avg_order_profit))\n print('pos_avg_order_price is {}'.format(pos_avg_order_price))\n print('pos_avg_price_list is {}'.format(pos_avg_price))\n print(i)\n # pos_avg_profit.clear()\n # pos_avg_price.clear()\n # i = 0\n break\n # if pos_avg_order_profit != base_profit_percent:\n # base_profit_percent = pos_avg_order_profit\n # elif pos_avg_order_profit == base_profit_percent:\n # pos_avg_order_price = round(np.mean(pos_avg_price),3)\n # order_close(trade_token, pos_avg_order_price, available_order_amount, int(direction)+2)\n # message = 'Your order profit percent is %s%%, order profit is %s and I close the order......' % (pos_avg_order_profit,profit)\n # print(alarm_send_email(message))\n # print('pos_retrace_profit <= profit_percent <= pos_list_max_profit')\n # print('pos_avg_order_profit is {}'.format(pos_avg_order_profit))\n # print('pos_avg_order_price is {}'.format(pos_avg_order_price))\n # print('pos_avg_price_list is {}'.format(pos_avg_price))\n # print(i)\n # pos_avg_profit.clear()\n # pos_avg_price.clear()\n # i = 0\n # break\n elif profit_percent < pos_retrace_profit:\n i = i + 1\n if i == 4:\n pos_order_profit = pos_retrace_profit\n pos_order_price = pos_retrace_price\n print('profit_percent < pos_retrace_profit')\n print('pos_retrace_price is {}'.format(pos_order_price))\n order_close(trade_token, pos_order_price, available_order_amount, int(direction)+2)\n message = 'Your order profit percent is %s%%, order prifit is %s and I close the order......' % (pos_order_profit, profit)\n print(alarm_send_email(message))\n break\n print(positive_profit_list)\n print(pos_list_max_profit)\n print(pos_retrace_profit)\n time.sleep(10)\n elif profit_percent > 7 and len(positive_profit_list)<=0:\n print('+profit and is None')\n positive_profit_list.append(profit_percent)\n positive_price_list.append(close_price)\n print(positive_profit_list)\n print('positive_price_list is {}'.format(positive_price_list))\n time.sleep(3)\n elif profit_percent < -5 and len(negative_profit_list) > 0:\n print('-profit and != None')\n print('negative is {}'.format(negative_profit_list))\n neg_list_max_profit = negative_profit_list[0]\n neg_list_max_price = negative_price_list[0]\n neg_retrace_profit = -37\n neg_retrace_price = open_order_price * 0.63\n if profit_percent <= -37:\n i = i + 1\n if i == 4:\n neg_order_profit = neg_retrace_profit\n neg_order_price = neg_retrace_price\n print('profit_percent <= neg_retrace_profit')\n order_close(trade_token, neg_order_price, available_order_amount, int(direction)+2)\n message = 'Your order profit percent is %s%%, order profit is %s and I close the order......' % (neg_order_profit, profit)\n print(alarm_send_email(message))\n break\n elif -37 < profit_percent < -5:\n neg_avg_profit.append(profit_percent)\n neg_avg_price.append(close_price)\n i = i + 1\n if i == 4:\n neg_avg_order_profit = round(np.mean(neg_avg_profit), 3)\n neg_avg_order_price = round(np.mean(neg_avg_price),3)\n print('-37 < profit_percent < -3')\n print(neg_avg_order_profit)\n print('neg_avg_price_list is {}'.format(neg_avg_price))\n print(i)\n order_close(trade_token, neg_avg_order_price, available_order_amount, int(direction)+2)\n message = 'Your order profit percent is %s%%, order profit is %s and I close the order......' % (neg_avg_order_profit, profit)\n print(alarm_send_email(message))\n break\n # negative_profit_list.append(profit_percent)\n # negative_profit_list.sort(reverse=True)\n # neg_list_max_list = negative_profit_list[0]\n print(negative_profit_list)\n print(neg_list_max_profit)\n print(neg_avg_price)\n time.sleep(10)\n elif profit_percent < -5 and len(negative_profit_list) <= 0:\n print('-profit and is None')\n negative_profit_list.append(profit_percent)\n negative_price_list.append(close_price)\n print(negative_profit_list)\n time.sleep(3)\n time.sleep(2)\n\n","sub_path":"thisweek.py","file_name":"thisweek.py","file_ext":"py","file_size_in_byte":16542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"373782476","text":"import enum\nimport os\nimport sys\n\nfrom .calculator import Vector2\nfrom .modules import Encoder, Motor, network\n\n\nclass State(enum.Enum):\n NONE = 0\n AUTO_INITIALIZE = 1\n AUTO_LOOP = 2\n TELEOP_INITIALIZE = 3\n TELEOP_LOOP = 4\n\n\ndef run(robot):\n \n try:\n robot = robot()\n finally:\n # Disconnect\n network.disconnect()\n print(sys.exc_info()[0])\n\n # Exit this way because python is dumb\n try:\n sys.exit(0)\n except SystemExit:\n os._exit(0)\n\n\nclass NetworkedRobot:\n \"\"\"NetworkedRobot class that robot.py will inherit from\"\"\"\n\n def __init__(self):\n\n # Things we get from the server\n self.world_position = Vector2(0, 0)\n self.world_rotation = 0\n\n self.__state = State.NONE\n\n self.robotInit()\n self.__loop()\n\n def __loop(self):\n \"\"\"Global state loop. Do not touch this\"\"\"\n\n auto_initialized = False\n teleop_initialized = False\n\n while True:\n self.__state = State(network.request_variable(\"state\")['value'])\n\n if (self.__state is State.NONE):\n auto_initialized = False\n teleop_initialized = False\n\n if (self.__state is State.AUTO_INITIALIZE and not auto_initialized):\n self.autonomousInit()\n auto_initialized = True\n\n if (self.__state is State.AUTO_LOOP):\n self.autonomousPeriodic()\n\n if (self.__state is State.TELEOP_INITIALIZE and not teleop_initialized):\n self.teleopInit()\n teleop_initialized = True\n\n if (self.__state is State.TELEOP_LOOP):\n self.teleopPeriodic()\n\n def robotInit(self):\n \"\"\"Called at initialization of the robot class. Override this\"\"\"\n\n def autonomousInit(self):\n \"\"\"Called only at the beginning of autonomous mode. Override this\"\"\"\n\n def autonomousPeriodic(self):\n \"\"\"Called every 20ms in autonomous mode. Override this\"\"\"\n\n def teleopInit(self):\n \"\"\"Called only at the beginning of teleop mode. Override this\"\"\"\n\n def teleopPeriodic(self):\n \"\"\"Called every frame in teleop mode. Override this\"\"\"\n","sub_path":"teamcode/resolver/networkedrobot.py","file_name":"networkedrobot.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"355751939","text":"import os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.style import context\nimport matplotlib as mpl\nfrom matplotlib import cm\nimport pandas as pd\nimport config\nfrom utils import typical_pair_utils\n\nmpl.rcParams['font.size'] = 7\nmpl.rcParams['lines.linewidth'] = 0.5\nspecies_name = 'Bacteroides_vulgatus_57955'\nsave_path = os.path.join(config.analysis_directory,\n \"closely_related\", \"third_pass\", \"{}_all_transfers_processed.pickle\".format(species_name))\nrun_df = pd.read_pickle(save_path)\ncf_cutoff=config.clonal_fraction_cutoff\n_, div_dist = typical_pair_utils.get_joint_plot_x_y(species_name)\n\nall_divergences = run_df['synonymous divergences']\nfig, axes = plt.subplots(3, 1, figsize=(5,4))\nplt.subplots_adjust(hspace=0.5)\n\nhisto = np.loadtxt(os.path.join(config.hmm_data_directory, species_name + '.csv'))\nmids = histo[0, :40]\nwithin_histo = histo[1, :40] / np.sum(histo[1, :])\nbetween_histo = histo[1, 40:] / np.sum(histo[1, :])\naxes[1].bar(mids, within_histo, width=mids[1] - mids[0], label='simulated within-clade', alpha=0.5)\naxes[1].bar(mids, between_histo, width=mids[1] - mids[0], label='simulated between-clade', alpha=0.5)\naxes[1].set_ylabel('Density')\naxes[1].legend()\naxes[1].set_xlabel('Synonymous divergence in transfer')\n\nbins = np.arange(0, all_divergences.max() + mids[1]-mids[0], mids[1]-mids[0])\naxes[2].hist(all_divergences, alpha=0.2, color='tab:grey', bins=bins, label='Total')\naxes[2].hist(run_df[run_df['types']==0]['synonymous divergences'], histtype='step', bins=bins, label='Detected within-clade')\naxes[2].hist(run_df[run_df['types']==1]['synonymous divergences'], histtype='step', bins=bins, label='Detected between-clade')\naxes[2].set_xlim(axes[1].get_xlim())\naxes[2].legend()\naxes[2].set_ylabel('# transfers')\naxes[2].set_xlabel('Synonymous divergence in transfer')\n\naxes[0].hist(div_dist[div_dist<0.03], bins=50)\naxes[0].hist(div_dist[div_dist>0.03], bins=50)\naxes[0].set_xlim(axes[1].get_xlim())\naxes[0].set_xlabel('Pairwise synonymous divergence')\naxes[0].set_ylabel('Pairs')\nplt.tight_layout()\nfig.savefig(os.path.join(config.figure_directory, 'supp_Bv_transfer_divergences.pdf'))","sub_path":"plotting_for_publication/supp_plot_Bv_transfer_divergences.py","file_name":"supp_plot_Bv_transfer_divergences.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"503168775","text":"import time\nimport bme280\nimport smbus2\nfrom luma.core.interface.serial import i2c\nfrom luma.core.render import canvas\nfrom luma.oled.device import ssd1306\nimport os\nimport time\nfrom PIL import ImageFont\nimport subprocess\n\nserial = i2c(port=1, address=0x3C)\ndevice = ssd1306(serial, rotate=0)\nport = 1\naddress = 0x76\nbus = smbus2.SMBus(port)\n\nwhile True:\n calibration_params = bme280.load_calibration_params(bus, address)\n data = bme280.sample(bus, address, calibration_params)\n font_path = os.path.abspath(os.path.join(os.path.dirname(__file__),\n 'fonts', 'Minecraftia.ttf'))\n font2 = ImageFont.truetype(font_path, 8)\n cmd = \"hostname -I | cut -d\\' \\' -f1\"\n IP = subprocess.check_output(cmd, shell = True )\n cmd = \"hostname -s | cut -d\\' \\' -f1\"\n host = subprocess.check_output(cmd, shell = True )\n cmd = \"top -bn1 | grep load | awk '{printf \\\"CPU Load: %.2f\\\", $(NF-2)}'\"\n CPU = subprocess.check_output(cmd, shell = True )\n cmd = \"free -m | awk 'NR==2{printf \\\"Mem: %s/%sMB %.2f%%\\\", $3,$2,$3*100/$2 }'\"\n MemUsage = subprocess.check_output(cmd, shell = True )\n cmd = \"df -h | awk '$NF==\\\"/\\\"{printf \\\"Disk: %d/%dGB %s\\\", $3,$2,$5}'\"\n Disk = subprocess.check_output(cmd, shell = True )\n humidity_string = \"%.2f\" % data.humidity\n pressure_string = \"%.2f\" % data.pressure\n temperature_string = \"%.2f\" % data.temperature\n\n with canvas(device) as draw:\n draw.text((0, -2), \"IP: \" + str(IP), fill=\"white\", font=font2)\n draw.text((0, 6), \"Host: \" + str(host), fill=\"white\", font=font2)\n draw.text((0, 14), str(CPU), fill=\"white\", font=font2)\n draw.text((0, 22), str(MemUsage), fill=\"white\", font=font2)\n draw.text((0, 30), str(Disk), fill=\"white\", font=font2)\n draw.text((0, 38), \"Temperature: \" + str(temperature_string) + \" C\", fill=\"white\", font=font2)\n draw.text((0, 46), \"Humidity: \" + str(humidity_string) + \" %\", fill=\"white\", font=font2)\n draw.text((0, 54), \"Pressure: \" + str(pressure_string) + \" hPa\", fill=\"white\", font=font2)\n\ndef main():\n while True:\n stats(device)\n time.sleep(0.5)\n","sub_path":"stats3.py","file_name":"stats3.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"445174714","text":"\nimport numpy as np\nfrom numpy.testing import assert_allclose\nimport thinplate as tps\n \ndef test_numpy_fit():\n c = np.array([\n [0., 0, 0.0],\n [1., 0, 0.0],\n [1., 1, 0.0],\n [0, 1, 0.0],\n ])\n\n theta = tps.TPS.fit(c)\n assert_allclose(theta, 0)\n assert_allclose(tps.TPS.z(c, c, theta), c[:, 2])\n\n c = np.array([\n [0., 0, 1.0],\n [1., 0, 1.0],\n [1., 1, 1.0],\n [0, 1, 1.0],\n ])\n\n theta = tps.TPS.fit(c)\n assert_allclose(theta[:-3], 0)\n assert_allclose(theta[-3:], [1, 0, 0])\n assert_allclose(tps.TPS.z(c, c, theta), c[:, 2], atol=1e-3)\n\n # reduced form\n theta = tps.TPS.fit(c, reduced=True)\n assert len(theta) == c.shape[0] + 2\n assert_allclose(theta[:-3], 0)\n assert_allclose(theta[-3:], [1, 0, 0])\n assert_allclose(tps.TPS.z(c, c, theta), c[:, 2], atol=1e-3)\n\n c = np.array([\n [0., 0, -.5],\n [1., 0, 0.5],\n [1., 1, 0.2],\n [0, 1, 0.8],\n ])\n\n theta = tps.TPS.fit(c)\n assert_allclose(tps.TPS.z(c, c, theta), c[:, 2], atol=1e-3)\n \ndef test_numpy_densegrid():\n\n # enlarges a small rectangle to full view\n\n import cv2\n\n img = np.zeros((40, 40), dtype=np.uint8)\n img[10:21, 10:21] = 255\n\n c_dst = np.array([\n [0., 0],\n [1., 0], \n [1, 1],\n [0, 1], \n ])\n\n\n c_src = np.array([\n [10., 10],\n [20., 10], \n [20, 20],\n [10, 20], \n ]) / 40.\n\n theta = tps.tps_theta_from_points(c_src, c_dst)\n theta_r = tps.tps_theta_from_points(c_src, c_dst, reduced=True)\n\n grid = tps.tps_grid(theta, c_dst, (20,20))\n grid_r = tps.tps_grid(theta_r, c_dst, (20,20))\n\n mapx, mapy = tps.tps_grid_to_remap(grid, img.shape)\n warped = cv2.remap(img, mapx, mapy, cv2.INTER_CUBIC)\n\n assert img.min() == 0.\n assert img.max() == 255.\n assert warped.shape == (20,20)\n assert warped.min() == 255.\n assert warped.max() == 255.\n assert np.linalg.norm(grid.reshape(-1,2) - grid_r.reshape(-1,2)) < 1e-3\n","sub_path":"data/thinplate/tests/test_tps_numpy.py","file_name":"test_tps_numpy.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"373323792","text":"from datetime import datetime\nfrom gmusicapi import Mobileclient\n\nclass PlaylistGenerator:\n\n def __init__(self, device_id, min_track_date, is_dry_run=False, do_logging=True):\n self.min_track_date = min_track_date\n self.is_dry_run = is_dry_run\n self.do_logging = do_logging\n \n self.api = Mobileclient()\n self.api.oauth_login(device_id) \n\n self.playlist_tracks = []\n self.playlist_track_count = 0\n self.playlist_count = 0\n self.total_track_count = 0\n\n \n def generate_playlists(self, playlist_prefix, playlist_suffix, max_tracks_per_playlist, since):\n\n self.library = self.api.get_all_songs(incremental=True,\n include_deleted=None)\n \n for partial_tracklist in self.library:\n for track in partial_tracklist:\n self._add_track(track)\n if(self.playlist_track_count == max_tracks_per_playlist):\n self._finish_current_playlist(playlist_prefix, playlist_suffix)\n \n if (self.playlist_track_count > 0):\n self._log('Have some tracks left over; making smaller playlist\\n')\n self._finish_current_playlist(playlist_prefix, playlist_suffix)\n\n self._log('Done; created '\n + str(self.playlist_count)\n + ' lists with '\n + str(self.total_track_count)\n + ' tracks total\\n')\n\n\n def _finish_current_playlist(self, prefix, suffix):\n name = (prefix + '_' + str(self.total_track_count - self.playlist_track_count) \n + '_' + str(self.total_track_count - 1) + suffix)\n \n self._log('Done with playlist ' + name + '\\n')\n\n if not self.is_dry_run:\n\n self._log('Uploading playlist ' + name + ' with '\n + str(len(self.playlist_tracks)) + ' tracks\\n')\n \n playlist_id = self.api.create_playlist(name)\n results = self.api.add_songs_to_playlist(\n playlist_id, self.playlist_tracks)\n \n self._log('Upload has ' + str(len(results))\n + ' playlist entries; first one is:'\n + results[0])\n\n self.playlist_tracks.clear()\n self.playlist_count += 1\n self.playlist_track_count = 0\n \n\n def _add_track(self, track):\n track_date = track['creationTimestamp']\n track_date = datetime.utcfromtimestamp(int(track_date) / 1000000)\n if track_date > self.min_track_date:\n #self._log('track is new; adding it'); \n self.playlist_tracks.append(track['id'])\n self.total_track_count += 1\n self.playlist_track_count += 1\n\n def _log(self, text):\n if(self.do_logging):\n print(text)\n\n \nif __name__ == '__main__':\n\n from configparser import RawConfigParser\n config = RawConfigParser()\n config.read('settings.ini')\n section = 'google_playlist_generator'\n\n device_id = config.get(section, 'device_id')\n\n last_sync_time = config.get(section, 'last_sync_time')\n update_last_sync_time = config.getboolean(section, 'update_last_sync_time')\n \n if last_sync_time.strip() != '':\n last_sync_time = datetime.strptime(last_sync_time, '%Y-%m-%d %H:%M:%S.%f')\n else:\n last_sync_time = datetime.utcfromtimestamp(0)\n\n \n is_dry_run = config.getboolean(section, 'is_dry_run')\n do_logging = config.getboolean(section, 'do_logging')\n\n pg = PlaylistGenerator(device_id, last_sync_time, is_dry_run, do_logging)\n\n now = datetime.now()\n playlist_prefix = now.strftime(config.get(section, 'playlist_prefix'))\n playlist_suffix = now.strftime(config.get(section, 'playlist_suffix'))\n tracks_per_playlist = config.getint(section, 'tracks_per_playlist')\n\n pg.generate_playlists(playlist_prefix, playlist_suffix, tracks_per_playlist, last_sync_time)\n\n if update_last_sync_time:\n if do_logging:\n print('Updating last_sync_time to %s' % now)\n \n config.set(section, 'last_sync_time', now)\n \n if not is_dry_run:\n with open('settings.ini', 'w') as configfile:\n config.write(configfile) #goodbye comments :(\n","sub_path":"google_playlist_generator.py","file_name":"google_playlist_generator.py","file_ext":"py","file_size_in_byte":4321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"12055063","text":"# Compute positions of planets in equatorial coordinates.\r\n# Basic data and formulae underlying this module are in background_literature folder.\r\n\r\nimport datetime as dt\r\nimport math as mt\r\n\r\nimport numscrypt as ns\r\n\r\nimport utils as ut\r\nimport transforms as tf\r\nimport planet_catalog as pc\r\n\r\n__pragma__ ('opov')\r\n\r\nmPerAu = 149597871e14\r\n\r\nclass Planet:\r\n def __init__ (self, solarSystem, name, basicOrbitElements, extraOrbitElements, period, radius, color):\r\n self.name = name\r\n self.solarSystem = solarSystem\r\n self.basicOrbitElements = basicOrbitElements\r\n self.extraOrbitElements = extraOrbitElements\r\n self.period = period\r\n self.radius = radius\r\n self.color = color\r\n\r\n def setEquatPosition (self):\r\n self.equatPosition = self.computeEquatOrbit (1)[0]\r\n\r\n def setEquatOrbit (self):\r\n self.equatOrbit = self.computeEquatOrbit (180)\r\n\r\n def setEarthViewPosition (self):\r\n rotatedPosition = self.solarSystem.planetarium.rotZyxMat @ (self.equatPosition - self.solarSystem.earth.equatPosition)\r\n self.earthViewPosition = tf.getStereographicProjection (rotatedPosition, self.solarSystem.getViewDistance ())\r\n\r\n def setFarViewOrbit (self):\r\n\r\n self.farViewOrbit = [tf.getProjection (self.equatPostion - ns.array ((30, 30, 10)), self.solarSystem.getViewDistance) for equatPosition in self.equatOrbit]\r\n\r\n def computeEquatOrbit (self, orbitSteps):\r\n a_0 = self.basicOrbitElements [0][0]\r\n a_der = self.basicOrbitElements [1][0]\r\n\r\n e_0 = self.basicOrbitElements [0][1]\r\n e_der = self.basicOrbitElements [1][1]\r\n\r\n I_0 = self.basicOrbitElements [0][2]\r\n I_der = self.basicOrbitElements [1][2]\r\n\r\n L_0 = self.basicOrbitElements [0][3]\r\n L_der = self.basicOrbitElements [1][3]\r\n\r\n om_bar_0 = self.basicOrbitElements [0][4]\r\n om_bar_der = self.basicOrbitElements [1][4]\r\n\r\n Om_0 = self.basicOrbitElements [0][5]\r\n Om_der = self.basicOrbitElements [1][5]\r\n \r\n # t_0 = ut.julianDayNr (dt.datetime (*self.solarSystem.getYmdHms ())) - ut.julianDayNr (dt.datetime (2000, 1, 1, 0, 0, 0))\r\n t_0 = ut.julianDayNr (dt.datetime.now ()) - ut.julianDayNr (dt.datetime (2000, 1, 1, 0, 0, 0))\r\n \r\n for i in range (orbitSteps):\r\n t = t_0 + i * self.period / orbitSteps\r\n \r\n daysPerCentury = 36525\r\n T = t / daysPerCentury\r\n \r\n a = a_0 + a_der * T\r\n e = e_0 + e_der * T\r\n I = I_0 + I_der * T\r\n L = L_0 + L_der * T\r\n om_bar = om_bar_0 + om_bar_der * T\r\n Om = Om_0 + Om_der * T\r\n\r\n b = self.extraOrbitElements [0]\r\n c = self.extraOrbitElements [1]\r\n s = self.extraOrbitElements [2]\r\n f = self.extraOrbitElements [3]\r\n \r\n om = om_bar - Om\r\n M = L - om_bar + b * T * T + c * mt.cos (ut.radFromDeg (f * T)) + s * mt.sin (ut.radFromDeg (f * T))\r\n \r\n M = M % 360\r\n\r\n if M > 180:\r\n M = M - 360\r\n \r\n e_star = ut.degFromRad (e)\r\n E = M + e_star * mt.sin (ut.radFromDeg (M))\r\n\r\n tol = 1e-6;\r\n del_E = 1e10\r\n\r\n while del_E > tol:\r\n del_M = M - (E - e_star * mt.sin (ut.radFromDeg (E)))\r\n del_E = del_M / (1 - e * mt.cos (ut.radFromDeg (E)))\r\n E = E + del_E\r\n\r\n xAccent = a * (mt.cos (ut.radFromDeg (E)) - e)\r\n yAccent = a * mt.sqrt (1 - e * e) * mt.sin (ut.radFromDeg (E))\r\n zAccent = 0\r\n\r\n equatOrbit = []\r\n \r\n equatOrbit.append (ns.array (ut.equatFromEclipt (\r\n (mt.cos (ut.radFromDeg (om)) * mt.cos (ut.radFromDeg (Om)) - mt.sin (ut.radFromDeg (om)) * mt.sin (ut.radFromDeg (Om)) * mt.cos (ut.radFromDeg (I))) * xAccent +\r\n (-mt.sin (ut.radFromDeg (om)) * mt.cos (ut.radFromDeg (Om)) - mt.cos (ut.radFromDeg (om)) * mt.sin (ut.radFromDeg (Om)) * mt.cos (ut.radFromDeg (I))) * yAccent,\r\n \r\n (mt.cos (ut.radFromDeg (om)) * mt.sin (ut.radFromDeg (Om)) + mt.sin (ut.radFromDeg (om)) * mt.cos (ut.radFromDeg (Om)) * mt.cos (ut.radFromDeg (I))) * xAccent +\r\n (-mt.sin (ut.radFromDeg (om)) * mt.sin (ut.radFromDeg (Om)) + mt.cos (ut.radFromDeg (om)) * mt.cos (ut.radFromDeg (Om)) * mt.cos (ut.radFromDeg (I))) * yAccent,\r\n \r\n mt.sin (ut.radFromDeg (om)) * mt.sin (ut.radFromDeg (I)) * xAccent +\r\n mt.cos (ut.radFromDeg (om)) * mt.sin (ut.radFromDeg (I)) * yAccent\r\n )))\r\n\r\n return equatOrbit\r\n\r\nclass SolarSystem:\r\n def __init__ (self, planetarium, getYmdHms, getViewDistance):\r\n self.planetarium = planetarium\r\n self.getYmdHms = getYmdHms\r\n self.getViewDistance = getViewDistance\r\n\r\n self.planets = [Planet (self, *args) for args in pc.planetCatalog]\r\n\r\n self.earth = self.planets [2]\r\n\r\n def setEquatPositions (self):\r\n for planet in self.planets:\r\n planet.setEquatPosition ()\r\n\r\n def setEquatOrbits (self):\r\n for planet in self.planets:\r\n planet.setEquatOrbit ()\r\n\r\n def setEarthViewPositions (self):\r\n for planet in self.planets:\r\n planet.setEarthViewPosition ()\r\n\r\n def setFarViewOrbits (self):\r\n for planet in self.planets:\r\n planet.setFarViewOrbit ()\r\n\r\n def printPositions (self):\r\n for planet in self.planets:\r\n print (planet.name, planet.equatPosition, planet.earthViewPosition)\r\n","sub_path":"planetarium/engine/solar_system.py","file_name":"solar_system.py","file_ext":"py","file_size_in_byte":5668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"81695675","text":"from codecs import open\nfrom os import name as os_name\nfrom os import path\n\nfrom setuptools import setup\n\nVERSION = \"0.3.7\" # using semantic versioning\n\nhere = path.abspath(path.dirname(__file__))\n\n\ndef install_requires():\n if os_name == \"nt\":\n return [\"win10toast\"]\n return []\n\n\ndef readme():\n with open(path.join(here, \"README.rst\"), encoding=\"utf-8\") as f:\n return f.read()\n\n\nwith open(path.join(here, \"inappropriate_notifications\", \"version.py\"), \"w\") as f:\n f.write(f\"VERSION = '{VERSION}'\")\n\nsetup(\n name=\"inappropriate-notifications\",\n version=VERSION,\n description=\"Display inappropriate notifications at random intervals\",\n long_description=readme(),\n long_description_content_type=\"text/x-rst\",\n license=\"MIT\",\n entry_points={\n \"console_scripts\": [\n \"inappropriate-notifications=inappropriate_notifications.command_line:main\"\n ]\n },\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Environment :: Win32 (MS Windows)\",\n \"Environment :: X11 Applications\",\n \"License :: OSI Approved :: MIT License\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: End Users/Desktop\",\n \"Natural Language :: English\",\n \"Operating System :: Microsoft :: Windows :: Windows 10\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 3.6\",\n \"Topic :: Artistic Software\",\n ],\n keywords=\"notifications notify inappropriate present\",\n url=\"https://github.com/riley-martine/inappropriate-notifications\",\n author=\"Riley Martine\", # Can we add multiple authors?\n author_email=\"riley.martine.0@gmail.com\",\n packages=[\"inappropriate_notifications\"],\n install_requires=install_requires(),\n python_requires=\"~=3.6\",\n include_package_data=True,\n zip_safe=False,\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"486337022","text":"import os\nimport stat\nimport json\nfrom uuid import uuid4\nfrom subprocess import Popen, PIPE\n\nclass PowerConsul_Action(object):\n \"\"\"\n Class object representing a trigger action.\n \"\"\"\n def __init__(self, actionData, state):\n self._data = actionData\n self._command = ['/bin/echo', 'noop']\n self._type = None\n self._state = state\n\n # Temporary script\n self._script = None\n\n # Bootstrap the action object\n self._bootstrap()\n\n def _subvars(self, cmdStr):\n \"\"\"\n Look for any custom substitution variables.\n \"\"\"\n for k,v in POWERCONSUL.CONFIG.get('local', 'subVars', default={}).__dict__.iteritems():\n cmdStr = cmdStr.replace('@{0}'.format(k), v)\n return cmdStr\n\n def _bootstrap(self):\n \"\"\"\n Bootstrap the action object.\n \"\"\"\n if not self._data:\n self._type = 'default'\n return None\n\n # Trigger is a script\n if self._data.startswith('#!/bin/bash'):\n self._type = 'script'\n\n # Define a temporary script\n self._script = '/tmp/trigger_{0}.sh'.format(str(uuid4()))\n\n # Dump the action script\n with open(self._script, 'w') as f:\n for line in self._data.split('\\n'):\n f.write(self._subvars(line))\n f.write('\\n')\n os.chmod(self._script, os.stat(self._script).st_mode | stat.S_IEXEC)\n\n # Define the command\n self._command = ['/bin/bash', self._script]\n\n # Assume direct shell command\n else:\n self._type = 'command'\n self._command = self._subvars(self._data).split(' ')\n\n def _cleanup(self):\n \"\"\"\n Post action cleanup.\n \"\"\"\n if os.path.isfile(self._script):\n os.remove(self._script)\n\n def run(self):\n \"\"\"\n Run the state action.\n \"\"\"\n\n # Is this triggered configured for noop?\n if POWERCONSUL.service in POWERCONSUL.CONFIG.get('local', 'noopTriggers', default=[]):\n POWERCONSUL.LOG.info('Service trigger(s) configured as noop. Skipping...')\n return True\n\n try:\n proc = Popen(self._command, stdout=PIPE, stderr=PIPE)\n out, err = proc.communicate()\n\n # Command failed\n if proc.returncode != 0:\n POWERCONSUL.LOG.error('type={0}, state={1}, error={2}'.format(self._type, self._state, str(err).rstrip()), method='action.run')\n\n # Command success\n else:\n POWERCONSUL.LOG.info('type={0}, state={1}, output={2}'.format(self._type, self._state, str(out).rstrip()), method='action.run')\n\n # Post action cleanup\n self._cleanup()\n\n # Failed to run action\n except Exception as e:\n POWERCONSUL.LOG.exception('state={0}, error={1}'.format(state, str(e)), method='action.run', die=True)\n\n @classmethod\n def checkNodes(cls):\n \"\"\"\n Check if any primary nodes are passing.\n \"\"\"\n if not POWERCONSUL.CLUSTER.nodes.enabled:\n return None\n\n # No active nodes passing\n if not POWERCONSUL.CLUSTER.activePassing(nodes=POWERCONSUL.CLUSTER.nodes.active):\n POWERCONSUL.LOG.info('No active/healthy nodes, set role: primary', method='action.checkNodes')\n POWERCONSUL.CLUSTER.role = POWERCONSUL.CLUSTER.roles.primary\n\n @classmethod\n def checkDatacenters(cls):\n \"\"\"\n Check if any primary datacenters are passing.\n \"\"\"\n if not POWERCONSUL.CLUSTER.datacenters.enabled:\n return None\n\n # No active datacenters passing\n if not POWERCONSUL.CLUSTER.activePassing(datacenters=[POWERCONSUL.CLUSTER.datacenters.active]):\n POWERCONSUL.LOG.info('No active/healthy datacenters, set role: primary', method='action.checkDatacenters')\n POWERCONSUL.CLUSTER.role = POWERCONSUL.CLUSTER.roles.primary\n\n @classmethod\n def parse(cls, state):\n \"\"\"\n Parse an action stored in the KV database.\n \"\"\"\n try:\n\n import powerconsul.common.logger as logger\n\n # Parse service JSON\n serviceJSON = json.loads(POWERCONSUL.ARGS.get('service',\n required='Must supply a service JSON object: powerconsul trigger -s '\n ))\n\n # Set Consul service name\n POWERCONSUL.service = serviceJSON['ServiceName']\n\n # Setup the logger\n POWERCONSUL.LOG = logger.create('trigger', service=POWERCONSUL.service, log_file='/var/log/powerconsul/trigger/{0}.{1}.log'.format(POWERCONSUL.service, state))\n POWERCONSUL.LOG.info('=' * 20)\n\n # Bootstrap cluster state\n POWERCONSUL.CLUSTER.bootstrap()\n\n # If secondary, make sure primaries are passing\n if POWERCONSUL.CLUSTER.role == POWERCONSUL.CLUSTER.roles.secondary:\n cls.checkDatacenters()\n cls.checkNodes()\n\n # Return the action object\n return cls(POWERCONSUL.getKV('triggers/{0}/{1}/{2}'.format(\n POWERCONSUL.service, POWERCONSUL.CLUSTER.role, state\n )), state)\n\n # Failed to parse service action/object\n except Exception as e:\n POWERCONSUL.LOG.exception('state={0}, error={1}'.format(state, str(e)), method='action.parse', die=True)\n","sub_path":"powerconsul/common/action.py","file_name":"action.py","file_ext":"py","file_size_in_byte":5482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"500298233","text":"import numpy as np\nfrom time import time\nfrom scipy.optimize import linprog\nfrom scipy.linalg import LinAlgError\n\nfrom ecmtool.helpers import redund\nfrom ecmtool._bglu_dense import BGLU\n\ndef fake_ecm(reaction, metabolite_ids, tol=1e-12):\n s = \"\"\n for i, c in enumerate(np.asarray(reaction, dtype='int')):\n if abs(reaction[i]) > tol:\n if s == \"\":\n s = metabolite_ids[i].replace(\"_in\", \"\").replace(\"_out\", \"\")\n elif s != metabolite_ids[i].replace(\"_in\", \"\").replace(\"_out\", \"\"):\n return False\n return True\n\n\ndef print_ecms_direct(R, metabolite_ids):\n obj_id = -1\n if \"objective\" in metabolite_ids:\n obj_id = metabolite_ids.index(\"objective\")\n elif \"objective_out\" in metabolite_ids:\n obj_id = metabolite_ids.index(\"objective_out\")\n\n print(\"\\n--%d ECMs found by intersecting directly--\\n\" % R.shape[1])\n # for i in range(R.shape[1]):\n # print(\"ECM #%d:\" % i)\n # div = 1\n # if obj_id != -1 and R[obj_id][i] != 0:\n # div = R[obj_id][i]\n # for j in range(R.shape[0]):\n # if R[j][i] != 0:\n # print(\"%s: %f\" % (metabolite_ids[j].replace(\"_in\", \"\").replace(\"_out\", \"\"), float(R[j][i]) / div))\n # print(\"\")\n\n\ndef get_more_basis_columns(A, basis):\n \"\"\"\n Called when the auxiliary problem terminates with artificial columns in\n the basis, which must be removed and replaced with non-artificial\n columns. Finds additional columns that do not make the matrix singular.\n \"\"\"\n m, n = A.shape\n\n # if (len(basis) > 0 and np.linalg.matrix_rank(A[:,basis]) < len(basis)):\n # raise Exception(\"Basis has dependent columns\")\n\n rank = np.linalg.matrix_rank(A[:, basis])\n new_basis = basis.copy()\n for i in range(n):\n if i in new_basis:\n continue\n prev_rank = rank\n prev_basis = new_basis\n new_basis = np.append(new_basis, i)\n rank = np.linalg.matrix_rank(A[:, new_basis])\n\n if rank == prev_rank: # column added did not increase rank\n new_basis = prev_basis\n if rank == m:\n break\n\n return new_basis\n\n\ndef kkt_check(c, A, x, basis, tol=1e-8, threshold=1e-3, max_iter=1000, verbose=True):\n \"\"\"\n Determine whether KKT conditions hold for x0.\n Take size 0 steps if available.\n \"\"\"\n ab = np.arange(A.shape[0])\n a = np.arange(A.shape[1])\n\n maxupdate = 10\n B = BGLU(A, basis, maxupdate, False)\n for iteration in range(max_iter):\n bl = np.zeros(len(a), dtype=bool)\n bl[basis] = 1\n xb = x[basis]\n\n try:\n l = B.solve(c[basis], transposed=True) # similar to v = linalg.solve(B.T, c[basis])\n except LinAlgError:\n return True, 1\n sn = c - l.dot(A) # reduced cost\n sn = sn[~bl]\n\n if np.all(sn >= -tol): # in this case x is an optimal solution\n if verbose:\n print(\"Did %d steps in kkt_check, found True - smallest sn: %.8f\" % (iteration - 1, min(sn)))\n return True, 0\n\n entering = a[~bl][np.argmin(sn)]\n u = B.solve(A[:, entering])\n\n i = u > tol # if none of the u are positive, unbounded\n if not np.any(i):\n print(\"Warning: unbounded problem in KKT_check\")\n if verbose:\n print(\"Did %d steps in kkt_check2\" % iteration - 1)\n return True, 1\n\n th = xb[i] / u[i]\n l = np.argmin(th) # implicitly selects smallest subscript\n step_size = th[l] # step size\n\n # Do pivot\n x[basis] = x[basis] - step_size * u\n x[entering] = step_size\n x[abs(x) < 10e-20] = 0\n B.update(ab[i][l], entering) # modify basis\n basis = B.b\n\n if np.dot(c, x) < -threshold: # found a better solution, so not adjacent\n if verbose:\n print(\"Did %d steps in kkt_check, found False - c*x %.8f\" % (iteration - 1, np.dot(c, x)))\n return False, 0\n\n print(\"Cycling?\")\n return True, 1\n\n\ndef get_nonsingular_pair(A, basis, entering, leaving, basis_hashes):\n for enter in entering:\n for leave in leaving:\n original = basis[leave]\n basis[leave] = enter\n if np.linalg.matrix_rank(A[:, basis]) >= min(A[:, basis].shape):\n if hash(np.sort(basis).tostring()) in basis_hashes:\n basis[leave] = original\n continue\n return basis\n basis[leave] = original\n print(\"Did not find non-singular entering+leaving index...\")\n basis[leaving[0]] = entering[0]\n return basis\n\n\ndef independent_rows(A):\n m, n = A.shape\n basis = np.asarray([], dtype='int')\n A_float = np.asarray(A, dtype='float')\n rank = np.linalg.matrix_rank(A_float)\n original_rank = rank\n\n if rank == m:\n return A\n\n rank = 0\n for i in range(m):\n prev_rank = rank\n prev_basis = basis\n basis = np.append(basis, i)\n rank = np.linalg.matrix_rank(A_float[basis])\n\n if rank == prev_rank: # row added did not increase rank\n basis = prev_basis\n if rank == original_rank:\n break\n\n return A[basis]\n\n\ndef eliminate_metabolite(R, met, network, calculate_adjacency=True, tol=1e-12, perturbed=False, verbose=True):\n # determine +/0/-\n plus = []\n zero = []\n minus = []\n for reaction in range(R.shape[1]):\n result = R[met, reaction]\n if abs(result) <= tol:\n zero.append(reaction)\n elif result > tol:\n plus.append(reaction)\n elif result < -tol:\n minus.append(reaction)\n else:\n zero.append(reaction)\n if verbose:\n print(\"\\tNumber of +: %d\" % len(plus))\n print(\"\\tNumber of -: %d\" % len(minus))\n print(\"\\tNumber of LP to do: %d\" % (len(plus) * len(minus)))\n\n # start next matrix with zero rows\n next_matrix = []\n for z in zero:\n col = R[:, z]\n next_matrix.append(col)\n\n if calculate_adjacency:\n adj = geometric_ray_adjacency(R, plus=plus, minus=minus, perturbed=perturbed, verbose=verbose,\n remove_cycles=True)\n\n # combine + and - if adjacent\n nr_adjacent = 0\n for p in plus:\n for m in minus:\n if not calculate_adjacency or adj[p, m] == 1:\n nr_adjacent += 1\n rp = R[met, p]\n rm = R[met, m]\n new_row = rp * R[:, m] - rm * R[:, p]\n if sum(abs(new_row)) > tol:\n next_matrix.append(new_row)\n\n if verbose:\n if len(plus) * len(minus) > 0:\n print(\"Of %d candidates, %d were adjacent (%f percent)\" % (\n len(plus) * len(minus), nr_adjacent, 100 * nr_adjacent / (len(plus) * len(minus))))\n else:\n print(\"Of %d candidates, %d were adjacent (0 percent)\" % (len(plus) * len(minus), nr_adjacent))\n\n next_matrix = np.asarray(next_matrix)\n\n\n\n # redund in case we have too many rows\n rows_before = next_matrix.shape[0]\n\n if verbose:\n print(\"\\tDimensions before redund: %d %d\" % (next_matrix.shape[0], next_matrix.shape[1]))\n start = time()\n # next_matrix = redund(next_matrix)\n end = time()\n rows_removed_redund = rows_before - next_matrix.shape[0]\n if verbose:\n print(\"\\tDimensions after redund: %d %d\" % (next_matrix.shape[0], next_matrix.shape[1]))\n print(\"\\t\\tRows removed by redund: %d\" % (rows_before - next_matrix.shape[0]))\n print(\"\\tRedund took %f seconds\" % (end - start))\n # if rows_before - next_matrix.shape[0] != 0:\n # input(\"Waiting...\")\n\n next_matrix = np.transpose(next_matrix)\n\n # delete all-zero row\n next_matrix = np.delete(next_matrix, met, 0)\n network.drop_metabolites([met])\n print(\"\\tDimensions after deleting row: %d %d\" % (next_matrix.shape[0], next_matrix.shape[1]))\n\n return next_matrix, rows_removed_redund\n\n\ndef get_remove_metabolite(R, network, reaction, verbose=True):\n column = R[:, reaction]\n for i in range(len(column)):\n if not network.metabolites[i].is_external:\n if column[i] != 0:\n return i\n print(\"\\tWarning: reaction to augment has only external metabolites\")\n return 0\n\n\ndef remove_cycles(R, network, tol=1e-12, verbose=True):\n deleted = []\n for k in range(2):\n number_rays = independent_rows(normalize_columns(np.array(R, dtype='float'))).shape[1]\n i = 0 + 2 * k\n j = 1 + 2 * k\n if j > R.shape[1] - 1:\n return R, deleted\n A_ub, b_ub, A_eq, b_eq, c, x0 = setup_LP(independent_rows(normalize_columns(np.array(R, dtype='float'))), i, j)\n\n if sum(abs(b_eq)) < tol:\n augment_reaction = i;\n met = get_remove_metabolite(R, network, augment_reaction)\n if verbose:\n print(\"Found an unbounded LP, augmenting reaction %d through metabolite %d\" % (augment_reaction, met))\n R, _ = eliminate_metabolite(R, met, network, calculate_adjacency=False)\n\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, method='revised simplex', options={'tol': 1e-12},\n x0=x0)\n if res.status == 4:\n print(\"Numerical difficulties with revised simplex, trying interior point method instead\")\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, method='interior-point', options={'tol': 1e-12})\n\n # if the objective is unbounded, there is a cycle that sums to zero\n while res.status == 3: # status 3 is unbounded\n A_ub2 = np.concatenate((A_ub, np.identity(number_rays)))\n b_ub2 = np.concatenate((b_ub, [100] * number_rays))\n\n res2 = linprog(c, A_ub2, b_ub2, A_eq, b_eq, method='revised simplex', options={'tol': 1e-12}, x0=x0)\n if res2.status == 4:\n print(\"Numerical difficulties with revised simplex, trying interior point method instead\")\n res2 = linprog(c, A_ub2, b_ub2, A_eq, b_eq, method='interior-point', options={'tol': 1e-12})\n\n if abs(res2.fun) < tol: # res is 'unbounded' but res2 has optimum 0\n break\n\n augment_reaction = [i for i, val in enumerate(res2.x) if val > 90][0]\n met = get_remove_metabolite(R, network, augment_reaction)\n deleted.append(met)\n if verbose:\n print(\"Found an unbounded LP, augmenting reaction %d through metabolite %d (%s)\" % (\n augment_reaction, met, network.metabolites[met].id))\n\n R, _ = eliminate_metabolite(R, met, network, calculate_adjacency=False)\n number_rays = independent_rows(normalize_columns(np.array(R, dtype='float'))).shape[1]\n i = 0 + 2 * k\n j = 1 + 2 * k\n A_ub, b_ub, A_eq, b_eq, c, x0 = setup_LP(independent_rows(normalize_columns(np.array(R, dtype='float'))), i,\n j)\n\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, method='revised simplex', options={'tol': 1e-12}, x0=x0)\n if res.status == 4:\n print(\"Numerical difficulties with revised simplex, trying interior point method instead\")\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, method='interior-point', options={'tol': 1e-12})\n\n return R, deleted\n\n\ndef normalize_columns(R):\n result = R.copy()\n for i in range(result.shape[1]):\n result[:, i] /= np.linalg.norm(np.array(R[:, i], dtype='float'))\n return result\n\n\ndef smallest_positive(arr):\n a = np.where(np.isfinite(arr), arr, -1)\n return min(np.where(a < 0, max(a) * 2, a)), np.argmin(np.where(a < 0, max(a) * 2, a))\n\n\ndef generate_BFS(R, i, j, eps):\n ray1 = np.array(np.concatenate((R[:, i], -R[:, i])), dtype='float')\n ray2 = np.array(np.concatenate((R[:, j], -R[:, j])), dtype='float')\n with np.errstate(divide='ignore', invalid='ignore'):\n alpha, k = smallest_positive(eps / ray1)\n beta = ray2[k] / ray1[k]\n arr = (eps - alpha * ray1) / (ray2 - beta * ray1)\n arr[k] = -1 # ignore place k, because it should always be divide by 0\n delta2, _ = smallest_positive(arr)\n delta1 = -beta * delta2\n sbar = eps - (alpha + delta1) * ray1 - delta2 * ray2\n l = np.zeros(R.shape[1])\n l[i] = 0.5 + alpha + delta1\n l[j] = 0.5 + delta2\n\n res = np.concatenate((l, sbar))\n # round to 0 when a rounding error made it non-zero\n res = np.where(abs(res) < 1e-20, 0, res)\n\n if len(res[res != 0]) != R.shape[0] * 2:\n print(\"problem in generate_BFS\")\n\n return res\n\n\ndef setup_LP_perturbed(R, i, j, epsilon):\n m, n = R.shape\n\n A_ub = -np.identity(n + 2 * m)\n b_ub = np.zeros(n + 2 * m)\n A_eq = np.concatenate((np.concatenate((R, -R)), np.identity(2 * m)), axis=1)\n ray1 = R[:, i]\n ray2 = R[:, j]\n tar = 0.5 * ray1 + 0.5 * ray2\n eps_vector = np.array([epsilon] * (2 * m)) + np.random.uniform(-epsilon / 2, epsilon / 2, 2 * m)\n b_eq = np.concatenate((tar, -tar)) + eps_vector\n x0 = generate_BFS(R, i, j, eps_vector)\n c = np.concatenate((-np.ones(n), np.zeros(2 * m)))\n c[i] = 0\n c[j] = 0\n\n return A_ub, b_ub, A_eq, b_eq, c, x0\n\n\ndef setup_LP(R_indep, i, j):\n number_rays = R_indep.shape[1]\n\n A_ub = -np.identity(number_rays)\n b_ub = np.zeros(number_rays)\n A_eq = R_indep\n ray1 = R_indep[:, i]\n ray2 = R_indep[:, j]\n b_eq = 0.5 * ray1 + 0.5 * ray2\n c = -np.ones(number_rays)\n c[i] = 0\n c[j] = 0\n x0 = np.zeros(number_rays)\n x0[i] = 0.5\n x0[j] = 0.5\n\n return A_ub, b_ub, A_eq, b_eq, c, x0\n\n\ndef determine_adjacency(R, i, j, perturbed, tol=1e-10):\n if perturbed:\n A_ub, b_ub, A_eq, b_eq, c, x0 = setup_LP_perturbed(R, i, j, 1e-10)\n else:\n A_ub, b_ub, A_eq, b_eq, c, x0 = setup_LP(R, i, j)\n\n # KKT\n disable_lp = True\n if perturbed:\n ext_basis = np.nonzero(x0)[0]\n else:\n ext_basis = get_more_basis_columns(np.asarray(A_eq, dtype='float'), [i, j])\n KKT, status = kkt_check(c, np.asarray(A_eq, dtype='float'), x0, ext_basis)\n # DEBUG\n # status = 0\n\n if status == 0:\n return 1 if KKT else 0\n\n print(\"\\t\\t\\tKKT had non-zero exit status...\")\n # input(\"Waiting...\")\n disable_lp = False\n\n if not disable_lp:\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, method='revised simplex',\n options={'tol': 1e-12, 'maxiter': 500})\n\n if res.status == 1:\n print(\"Iteration limit %d reached, trying Blands pivot rule\" % (res.nit))\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, method='revised simplex',\n options={'tol': 1e-12, 'pivot': \"Bland\", 'maxiter': 20000})\n\n if res.status == 4:\n print(\"Numerical difficulties with revised simplex, trying interior point method instead\")\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, method='interior-point',\n options={'tol': 1e-12})\n\n if res.status != 0:\n print(\"Status %d\" % res.status)\n # input(\"Waiting...\")\n\n print(\"res.fun: %.2e res.nit: %d\" % (abs(res.fun), res.nit))\n if res.status != 0 or abs(res.fun) < tol:\n return 1\n\n return 0\n\n\ndef geometric_ray_adjacency(R, plus=[-1], minus=[-1], tol=1e-3, perturbed=False, verbose=True, remove_cycles=True):\n \"\"\"\n Returns r by r adjacency matrix of rays, given\n ray matrix R. Diagonal is 0, not 1.\n Calculated using LP adjacency test\n :param R: ray matrix (columns are generating rays)\n plus: indices of 'plus' columns\n minus: indices of 'minus' columns\n if plus/minus are not provided, find all adjacencies; otherwise only between each + and - pair\n :return: r by r adjacency matrix\n \"\"\"\n start = time()\n\n # with normalization\n R_normalized = normalize_columns(np.array(R, dtype='float'))\n R_indep = independent_rows(R_normalized)\n # without normalization\n # R_indep = independent_rows(R)\n\n LPs_done = 0\n # set default plus and minus\n if (len(plus) > 0 and plus[0] == -1):\n plus = [x for x in range(R_indep.shape[1])]\n if (len(minus) > 0 and minus[0] == -1):\n minus = [x for x in range(R_indep.shape[1])]\n\n number_rays = R_indep.shape[1]\n adjacency = np.zeros(shape=(number_rays, number_rays))\n\n disable_lp = not remove_cycles\n total = len(plus) * len(minus)\n\n print(\"\\n\\tLargest non-LP ray: %.2f\" % max(\n [np.linalg.norm(np.array(R[:, i], dtype='float')) for i in range(R.shape[1])]))\n print(\"\\tMax/min: %.3f\" % max(\n [abs(abs(np.array(R[:, i], dtype='float')).max() / np.min(\n abs(np.array(R[:, i], dtype='float'))[np.nonzero(R[:, i])])) for i in range(R.shape[1])]))\n print(\"\\tLargest LP ray: %.2f\" % max(\n [np.linalg.norm(np.array(R_indep[:, i], dtype='float')) for i in range(R_indep.shape[1])]))\n\n for ind1, i in enumerate(plus):\n for ind2, j in enumerate(minus):\n it = ind2 + ind1 * len(minus)\n if verbose:\n print(\"Doing KKT test %d of %d (%.2f percent done)\" % (it, total, it * 100 / total))\n\n adjacency[i, j] = determine_adjacency(R_indep, i, j, perturbed)\n\n end = time()\n print(\"Did %d LPs in %f seconds\" % (LPs_done, end - start))\n return adjacency\n\n\ndef reduce_column_norms(matrix):\n for i in range(matrix.shape[1]):\n norm = np.linalg.norm(np.array(matrix[:, i], dtype='float'))\n if norm > 2:\n matrix[:, i] /= int(np.floor(norm))\n return matrix\n\n\ndef remove_fake_ecms(R, network):\n metabolite_ids = [network.metabolites[i].id for i in network.external_metabolite_indices()]\n real_ecms = np.array([not fake_ecm(R[:, i], metabolite_ids) for i in range(R.shape[1])])\n return R[:, real_ecms]\n\n\ndef unsplit_metabolites(R, network):\n metabolite_ids = [network.metabolites[i].id for i in network.external_metabolite_indices()]\n res = []\n ids = []\n\n processed = {}\n for i in range(R.shape[0]):\n metabolite = metabolite_ids[i].replace(\"_in\", \"\").replace(\"_out\", \"\")\n if metabolite in processed:\n row = processed[metabolite]\n res[row] += R[i, :]\n else:\n res.append(R[i, :].tolist())\n processed[metabolite] = len(res) - 1\n ids.append(metabolite)\n\n # remove all-zero rays\n res = np.asarray(res)\n res = res[:, [sum(abs(res)) != 0][0]]\n\n return res, ids\n\n\ndef in_cone(R, tar):\n number_rays = R.shape[1]\n\n A_ub = -np.identity(number_rays)\n b_ub = np.zeros(number_rays)\n A_eq = R\n b_eq = tar\n c = -np.ones(number_rays)\n\n res = linprog(c, A_ub, b_ub, A_eq, b_eq, method='revised simplex', options={'tol': 1e-12})\n\n return A_ub, b_ub, A_eq, b_eq, c\n\n\ndef intersect_directly(R, internal_metabolites, network, perturbed=False, verbose=True, tol=1e-12):\n # rows are rays\n deleted = np.array([])\n it = 1\n internal = list(internal_metabolites)\n internal.sort()\n rows_removed_redund = 0\n\n while len(internal) > 0:\n i = internal[np.argmin(\n [np.sum(R[j - len(deleted[deleted < j]), :] > 0) * np.sum(R[j - len(deleted[deleted < j]), :] < 0) for j in\n internal])]\n # i = internal[len(internal)-1]\n to_remove = i - len(deleted[deleted < i])\n if verbose:\n print(\"\\nIteration %d (internal metabolite = %d: %s) of %d\" % (it, to_remove, [m.id for m in network.metabolites][to_remove], len(internal_metabolites)))\n print(\"Possible LP amounts for this step:\\n\" + \", \".join(np.sort(\n [np.sum(R[j - len(deleted[deleted < j]), :] > 0) * np.sum(R[j - len(deleted[deleted < j]), :] < 0) for j\n in internal]).astype(str)))\n print(\"Total: %d\" % sum(\n [np.sum(R[j - len(deleted[deleted < j]), :] > 0) * np.sum(R[j - len(deleted[deleted < j]), :] < 0) for j\n in internal]))\n it += 1\n R, removed = eliminate_metabolite(R, i - len(deleted[deleted < i]), network, calculate_adjacency=True,\n perturbed=perturbed)\n rows_removed_redund += removed\n deleted = np.append(deleted, i)\n internal.remove(i)\n\n # remove artificial rays introduced by splitting metabolites\n # R = remove_fake_ecms(R, network)\n R, ids = unsplit_metabolites(R, network)\n\n if verbose:\n print(\"\\n\\tRows removed by redund overall: %d\\n\" % rows_removed_redund)\n if rows_removed_redund != 0:\n pass\n # input(\"Waiting...\")\n\n return R, ids\n","sub_path":"ecmtool/intersect_directly.py","file_name":"intersect_directly.py","file_ext":"py","file_size_in_byte":20574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"207831032","text":"# -*- coding: utf-8 -*-\nimport cv2\nimport numpy as np\n\n\ndef normalize_image(img):\n ret, img = cv2.threshold(img, 100, 255, cv2.THRESH_BINARY)\n img = center_image(img)\n cv2.imwrite('test1.png', img)\n img = padding_img(img, board=2, resize=(28, 28))\n cv2.imwrite('test.png', img)\n return img\n \n\ndef padding_img(img, board=0, resize=None):\n x, y = img.shape\n max_shape = max(img.shape)\n new_img = np.zeros((max_shape, max_shape), dtype='uint8')\n d = abs(x - y) // 2\n # 居中图片\n if x > y:\n new_img[:, d: y + d] = img\n elif x < y:\n new_img[d: x + d, ] = img\n else:\n new_img = img\n # 更改尺寸并加上边框\n if board != 0 and resize is not None:\n new_size = (resize[0] - 2 * board, resize[1] - 2 * board)\n if resize is not None:\n resize_img = cv2.resize(new_img, new_size)\n board_img = np.zeros(resize, dtype='uint8')\n board_img[board: board + new_size[0], board: board + new_size[1]] = resize_img\n return board_img\n\n if board != 0:\n board_img = np.zeros((max_shape + 2 * board, max_shape + 2 * board), dtype='uint8')\n board_img[board: board + max_shape, board: board + max_shape] = new_img\n return board_img\n return new_img\n\n\ndef center_image(img):\n img = img.copy()\n _, ((t,l), (b, r)) = order_points(np.transpose(img.nonzero()))\n img = img[t: b + 1, l: r + 1]\n return img\n\n\ndef order_points(pts):\n pts = np.array(pts).reshape((-1, 2))\n rect1 = np.zeros((4, 2), dtype=\"int32\")\n rect2 = np.zeros((2, 2), dtype=\"int32\")\n s = pts.sum(axis=1)\n rect1[0] = pts[np.argmin(s)]\n rect1[2] = pts[np.argmax(s)]\n\n diff = np.diff(pts, axis=1)\n rect1[3] = pts[np.argmin(diff)]\n rect1[1] = pts[np.argmax(diff)]\n\n min_x = np.argmin(pts[:, 0])\n min_y = np.argmin(pts[:, 1])\n\n max_x = np.argmax(pts[:, 0])\n max_y = np.argmax(pts[:, 1])\n\n rect2[0] = (pts[min_x][0], pts[min_y][1])\n rect2[1] = (pts[max_x][0], pts[max_y][1])\n return rect1, rect2\n","sub_path":"fiximage.py","file_name":"fiximage.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"592504432","text":"from turtle import Turtle\nimport random\n\nCOLORS = [\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"purple\"]\nSTARTING_MOVE_DISTANCE = 5\nMOVE_INCREMENT = 10\n\n\nclass CarManager():\n def __init__(self):\n self.all_cars = []\n\n def create_car(self):\n if random.randint(1,6) == 1:\n new_car = Turtle()\n new_car.shape('square')\n new_car.penup()\n new_car.color(random.choice(COLORS))\n new_car.goto(300, random.randint(-250, 250))\n new_car.setheading(180)\n new_car.shapesize(1, 2)\n self.all_cars.append(new_car)\n\n def move(self, level):\n move_speed = STARTING_MOVE_DISTANCE + (level-1) * MOVE_INCREMENT\n for car in self.all_cars:\n if car.xcor() < -320:\n car.hideturtle()\n self.all_cars.remove(car)\n # self.all_cars.pop(self.all_cars.index(car))\n car.forward(move_speed)\n","sub_path":"Turtle-crossing/car_manager.py","file_name":"car_manager.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"546610381","text":"__author__ = \"Ettore Forigo\"\n__license__ = \"GPL\"\n__date__ = \"12/01/2017\"\n__version__ = \"1\"\n__status__ = \"Development\"\n\n\ndef twos_comp(x, nbits):\n\tmax_negative = -int('1'+('0' * (nbits - 1)), 2)\n\tmax_positive = -max_negative-1\n\n\tif not (max_negative <= x <= max_positive):\n\t\traise ValueError('Number not included in bit range')\n\n\tif x < 0:\n\t\tcomplement = 2**nbits\n\telse:\n\t\tcomplement = 0\n\n\treturn '0b'+bin(complement + x)[2:].zfill(nbits)\n","sub_path":"twos_comp.py","file_name":"twos_comp.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"529640718","text":"class Queue():\n def __init__(self):\n self.queue = []\n def enqueue(self, value):\n self.queue.append(value)\n def dequeue(self):\n if self.size() > 0:\n return self.queue.pop(0)\n else:\n return None\n def size(self):\n return len(self.queue)\n\n\ndef earliest_ancestor(ancestors, starting_node):\n # ---------- Building the relatives dictionary ----------\n # Do graph here instead\n\n # Set a blank dict for the relatives\n relatives = {}\n\n # Loop through the parameter ancestors\n for i in ancestors:\n parent = i[0] # add vertex\n child = i[1]\n\n # If the child isn't in the relatives dict yet...\n if child not in relatives:\n # Give the child a spot with an empty list\n relatives[child] = [] # add edge\n # Add the parent to the list of relatives\n relatives[child].append(parent)\n\n # If the starting node isn't in the relatives dict...\n if starting_node not in relatives:\n # The child has no ancestors\n return -1\n\n # ---------- Setting up the search ----------\n\n # Set a blank list for the paths\n paths = []\n\n # Create an empty Queue\n q = Queue()\n \n # Add the parameter starting node to the queue\n q.enqueue([starting_node])\n\n # While the queue is not empty...\n while q.size() > 0:\n # Dequeue the first path\n path = q.dequeue()\n # Grab the last vertex from the PATH\n last_vertex = path[-1]\n\n # Check if this node is in the relatives dict\n if last_vertex in relatives:\n # Add the ancestors for this node to the queue\n for ancestor in relatives[last_vertex]:\n # Add the current ancestor to the path\n new_path = path + [ancestor]\n # Append the ancestor to the back of the path\n q.enqueue(new_path)\n # Once there are no more \"last_vertex\"s in the relatives dict\n # We're at the end of the path, so we can log what we've traversed\n else:\n # Copy the current path as is\n new_path = path[:]\n # Store it in the paths list\n paths.append(new_path)\n \n # ---------- Calculate the earliest ancestor ----------\n\n # Determine the max length of every possible path in paths\n max_length = max([len(path) for path in paths])\n\n # Return the smallest number from the list of paths matching the max length\n return min([path[-1] for path in paths if len(path) == max_length])\n\n\n# For testing purposes:\nancestors = [(1, 3), (2, 3), (3, 6), (5, 6), (5, 7), (4, 5), (4, 8), (8, 9), (11, 8), (10, 1)]\nprint(earliest_ancestor(ancestors, 1))","sub_path":"projects/ancestor/ancestor.py","file_name":"ancestor.py","file_ext":"py","file_size_in_byte":2711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"469280271","text":"import os\nimport argparse\n\ndef aruments():\n parser = argparse.ArgumentParser(description=\"Hello.......World\")\n parser.add_argument('--a',required=True)\n parser.add_argument('--b',required=True)\n return parser.parse_args()\n\ndef add(a,b):\n print (\"result:\", int(a)+int(b))\n\nif __name__==\"__main__\":\n print (\"addition:\")\n args = aruments()\n x= args.a\n y= args.b\n add(x,y)\n","sub_path":"add/addition.py","file_name":"addition.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"164668019","text":"#!/usr/bin/env python3\n\nimport multiprocessing\nimport os\nimport shutil\nimport subprocess\n\n\nIMAGE = \"spicy-fuzz\"\nOUT = os.getcwd() + \"/spicy-fuzz\"\n\nMAX_TOTAL_TIME = 600\n\ndef update_sources(uri: str, dest: str):\n if not os.path.isdir(dest):\n subprocess.check_call([\"git\", \"clone\", \"--recursive\", uri, dest])\n else:\n subprocess.check_call([\"git\", \"pull\"], cwd=dest)\n subprocess.check_call([\"git\", \"submodule\", \"update\", \"--recursive\", \"--init\"], cwd=dest)\n\n subprocess.call([\"git\", \"reset\", \"HEAD\", \"--hard\"], cwd=dest)\n\n# Build base Docker image.\nsubprocess.check_call([\"docker\", \"build\", \"-t\", IMAGE, \".\"])\n\n# Check out or update Spicy and spicy-analyzers sources.\nupdate_sources(\"https://github.com/zeek/spicy\", \"spicy\")\nupdate_sources(\"https://github.com/zeek/spicy-analyzers\", \"spicy/zeek/spicy-analyzers\")\n\n# Update Spicy for fuzzing.\nif not os.path.isdir(\"spicy/ci/fuzz\"):\n os.mkdir(\"spicy/ci/fuzz\")\nfor f in [\"build.sh\", \"Dockerfile\", \"fuzz.cc\", \"run.py\", \"CMakeLists.txt\"]:\n shutil.copy(f, \"spicy/ci/fuzz/%s\" % f)\n\nwith open(\"spicy/CMakeLists.txt\", \"a\") as cmakelists:\n cmakelists.write(\"add_subdirectory(ci/fuzz)\")\n\n# Create fuzzing binaries.\ntry:\n os.mkdir(OUT)\nexcept FileExistsError:\n pass\n\nsubprocess.check_call([\"docker\", \"run\", \"--rm\",\n \"--privileged\",\n \"-v\", OUT+\":/out\", \"-e\", \"OUT=/out\",\n \"-v\", os.getcwd() + \"/spicy:/work\",\n \"-e\", \"CXX=clang++-12\",\n \"-e\", \"CC=clang-12\",\n \"-e\", \"SANITIZER=address\",\n IMAGE,\n \"/work/ci/fuzz/build.sh\",\n ])\n\n# Run individual fuzzers.\nfuzzers = {\n \"dhcp\": [\"Message\"],\n \"dns\": [\"Message\"],\n \"http\": [\"HTTP::Request\", \"HTTP::Requests\", \"HTTP::Reply\", \"HTTP::Replies\"],\n \"ipsec\": [\"IPSecPacketUDP\", \"IPSecPacketsTCP\", \"IPSecIKE\"],\n \"tftp\": [\"Packet\"],\n \"pe\": [\"ImageFile\"],\n \"PNG\": [\"File\"],\n \"wireguard\": [\"WireGuardPacket\"],\n}\n\nfor grammar, parsers in fuzzers.items():\n for parser in parsers:\n subprocess.check_call([\"docker\", \"run\", \"--rm\",\n \"-v\", OUT + \":/work\",\n \"-e\", \"SPICY_FUZZ_PARSER=\" + parser,\n \"-e\", \"ASAN_OPTIONS=detect_leaks=0\",\n IMAGE,\n *\"/work/fuzz-{grammar} -timeout={max_total_time} -max_total_time={max_total_time} -jobs={nproc} -create_missing_dirs=1 -artifact_prefix=/work/corpus-fuzz-{grammar}-{parser}/artifacts/ /work/corpus-fuzz-{grammar}-{parser}\".format(\n grammar=grammar,\n parser=parser,\n max_total_time=MAX_TOTAL_TIME,\n nproc=multiprocessing.cpu_count()).split(),\n ])\n","sub_path":"fuzz/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"437785692","text":"import tensorflow as tf\nimport numpy as np\n\nfrom utils.tf_util import tf_op\n\ndef activate(weights, func_name, name=None):\n \"\"\" function \"\"\"\n func_name = func_name.lower()\n if func_name == 'sigmoid':\n return tf.nn.sigmoid(weights, name=name)\n elif func_name == 'softmax':\n return tf.nn.softmax(weights, name=name)\n elif func_name == 'relu':\n return tf.nn.relu(weights, name=name)\n elif func_name == 'tanh':\n return tf.nn.tanh(weights, name=name)\n elif func_name == 'elu':\n return tf.nn.elu(weights, name=name)\n elif func_name == 'none':\n return weights\n else:\n return tf.nn.relu(weights, name=name)\n\nclass MLP:\n \"\"\"\n 生成MLP网络, in_placeholder 为输入的inplaceholder, layer_sizes为每层的维数,activation_names为对应每层的activation\n in_placeholder=tf.placeholder(tf.float32, [None, 42])\n layer_sizes=[10, 1], activation_names=['relu', 'none']\n \"\"\"\n def __init__(self, sess, in_placeholder, layer_sizes, activation_names, initial_val_dict=None):\n tf.assert_equal(tf.rank(in_placeholder), 2, message='input rank should be 2')\n\n self.input_dimension = in_placeholder.shape.as_list()[1]\n self.output_dimension = layer_sizes[-1]\n self.input_tensor = in_placeholder\n self.sess = sess\n self.params = {}\n\n # build network\n layer_sizes.insert(0, self.input_dimension)\n activation_names.insert(0, 'none')\n\n layers_n = len(layer_sizes)\n cur_in = in_placeholder #当前层的输入\n for ind in range(layers_n - 1):\n in_size = layer_sizes[ind]\n out_size = layer_sizes[ind + 1]\n w_name = \"W%d\" % (ind + 1)\n b_name = \"b%d\" % (ind + 1)\n\n if initial_val_dict is None or w_name not in initial_val_dict:\n w_initializer = tf.initializers.truncated_normal(stddev=0.1 / np.sqrt(float(in_size)))\n #w_initial_val = tf.truncated_normal([in_size, out_size], stddev=0.1 / np.sqrt(float(in_size)))\n else:\n w_initializer = initial_val_dict[w_name]\n\n if initial_val_dict is None or b_name not in initial_val_dict:\n b_initializer = tf.initializers.truncated_normal(stddev=0.1 / np.sqrt(float(in_size)))\n #b_initial_val = tf.truncated_normal([1, out_size], stddev=0.1 / np.sqrt(float(in_size)))\n else:\n b_initializer = initial_val_dict[b_name]\n\n w = tf.get_variable(w_name, shape=[in_size, out_size], dtype=tf.float32, initializer=w_initializer)\n b = tf.get_variable(b_name, shape=[1, out_size], dtype=tf.float32, initializer=b_initializer)\n\n for i in range(in_size):\n tensor = tf.gather(w, i, axis=0)\n tf_op.variable_summaries(tensor, '{}_{}'.format(w_name, i))\n tf_op.variable_summaries(w, '{}'.format(w_name))\n tf_op.variable_summaries(b, '{}'.format(b_name))\n\n #w = tf.Variable(w_initial_val, name=w_name, dtype=tf.float32)\n #b = tf.Variable(b_initial_val, name=b_name, dtype=tf.float32)\n z = tf.add(tf.matmul(cur_in, w), b)\n cur_out = activate(z, func_name=activation_names[ind + 1])\n self.params[w_name] = w\n self.params[b_name] = b\n cur_in = cur_out\n self.output_tensor = cur_out\n\n @property\n def param_tensor(self):\n return self.__params.values()\n\n def get_output_value(self, input_value):\n feed_dict = {\n self.input_tensor: input_value,\n }\n output = self.sess.run(self.output_tensor, feed_dict)\n return output\n\n def get_gradients(self, loss):\n trainable_variables = list(self.params.values())\n grads = tf.gradients(loss, trainable_variables)\n grads = list(zip(grads, trainable_variables))\n return grads\n\ndef test():\n with tf.Session() as sess:\n ipt = tf.placeholder(tf.float32, shape=(None, 2))\n mlp = MLP(sess, ipt, [1], ['none'])\n\n sess.run(tf.global_variables_initializer())\n\n output = mlp.get_output_value(np.random.rand(1, 2))\n print(output)\n\nif __name__ == '__main__':\n test()\n","sub_path":"models/neural_network.py","file_name":"neural_network.py","file_ext":"py","file_size_in_byte":4232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"79190482","text":"#\n\"\"\"\nfiltergraph:\n\n(LiveThread:livethread) --> {InfoFrameFilter:live_out_filter}\n\"\"\"\n#\n\"\"\"\nImport the valkka level 1 API:\n\"\"\"\nimport time\nfrom valkka.core import *\n\n\"\"\"\nCreate a starting point for a FrameFilter chain:\n\"\"\"\nlive_out_filter =InfoFrameFilter(\"live_out_filter\")\n\n\"\"\"\nThis is the \"entry point\" where we receive all the frames. \n\nInfoFrameFilter does nothing fancy - it just prints out the frames it receives. \n\nHowever, as you will learn during this tutorial, FrameFilters can do a lot of stuff. You can chain them together. They can be used to fork and copy the stream into complex graphs, etc.\n \nNext we need a thread that feeds the frames into our FrameFilter, so we instantiate a LiveThread:\n\"\"\"\nlivethread =LiveThread(\"livethread\")\n\n\"\"\"\nWe also need a context describing the connection to an IP camera:\n\"\"\"\nctx =LiveConnectionContext(LiveConnectionType_rtsp, \"rtsp://admin:nordic12345@192.168.1.41\", 1, live_out_filter)\n\n\"\"\"\nThe first parameter defines the device type, which in this case is an IP camera using the rtsp protocol. Note that we include the \"entry point\" live_out_filter. The integer parameter \"1\" is the slot number - it will be discussed in detail later on in this tutorial.\n\nFinally, we can start streaming frames from the IP camera:\n\"\"\"\nlivethread.startCall()\nlivethread.registerStreamCall(ctx)\nlivethread.playStreamCall(ctx)\ntime.sleep(5)\nlivethread.stopCall()\nprint(\"bye\")\n\n\"\"\"\nThe output looks like this:\n\n::\n\n InfoFrameFilter: live_out_filter start dump>> \n InfoFrameFilter: FRAME : \n InfoFrameFilter: PAYLOAD : []\n InfoFrameFilter: timediff: 0\n InfoFrameFilter: live_out_filter <> \n InfoFrameFilter: FRAME : \n InfoFrameFilter: PAYLOAD : [0 0 0 1 103 100 0 42 173 132 1 12 32 8 97 0 67 8 2 24 ]\n InfoFrameFilter: timediff: 0\n InfoFrameFilter: live_out_filter <> \n InfoFrameFilter: FRAME : \n InfoFrameFilter: PAYLOAD : [0 0 0 1 104 238 49 178 27 ]\n InfoFrameFilter: timediff: -1\n InfoFrameFilter: live_out_filter <\"\"\"\n","sub_path":"api_level_1/tutorial/lesson_1_a.py","file_name":"lesson_1_a.py","file_ext":"py","file_size_in_byte":2999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"64259557","text":"import DSGRN\nimport subprocess\nimport graphviz\n\n\nwith open('testnetwork.txt','w') as f:\n f.write('S : (S) \\nMD : (S) : E\\nRp : (~MD) : E\\nEE : (MD + EE)(~Rp) : E')\n\n# subprocess.call('mpiexec -np 4 ../DSGRN/software/Signatures/bin/Signatures testnetwork.txt testnetwork.db',shell=True)\n\nFP_OFF= {\"EE\":[0,0],\"Rp\":[1,1]}\nFP_ON={\"EE\":[1,8],\"Rp\":[0,0]}\n\n\ndatabase = DSGRN.Database('testnetwork.db')\nQ = DSGRN.MonostableFixedPointQuery(database, FP_OFF).matches()\nP = DSGRN.MonostableFixedPointQuery(database, FP_ON).matches()\nB = DSGRN.DoubleFixedPointQuery(database, FP_OFF, FP_ON).matches()\nsingle_gene_query = DSGRN.SingleGeneQuery(database, \"S\")\nQQ = DSGRN.SingleFixedPointQuery(database, FP_OFF).matches()\nPP = DSGRN.SingleFixedPointQuery(database, FP_ON).matches()\n\nfor n in range(single_gene_query.number_of_reduced_parameters()):\n graph = single_gene_query(n)\n graph.color = lambda v : \"green\" if graph.mgi(v) in Q else (\"red\" if graph.mgi(v) in P else ( \"yellow\" if graph.mgi(v) in B else (\"darkgreen\" if graph.mgi(v) in QQ else (\"orange\" if graph.mgi(v) in PP else \"white\"))))\n graphstr = 'digraph {' + \\\n '\\n'.join([ 'X' + str(v) + '[label=\"' + graph.label(v) + '\";style=\"filled\";fillcolor=\"' + graph.color(v) + '\"];' for v in graph.vertices ]) + \\\n '\\n' + '\\n'.join([ 'X' + str(u) + \" -> \" + 'X' + str(v) + ';' for (u, v) in graph.edges ]) + \\\n '\\n' + '}\\n'\n with open(\"testgraph{:02d}.gv\".format(n),\"w\") as f:\n f.write(graphstr)\n # for v in graph.vertices:\n # if graph.color(v) == \"white\":\n # parametergraph = DSGRN.ParameterGraph(DSGRN.Network('testnetwork.txt'))\n # parameter = parametergraph.parameter(single_gene_query.database.full_parameter_index(n,v,database.network.index(\"S\")))\n # domaingraph = DSGRN.DomainGraph(parameter)\n # morsedecomposition = DSGRN.MorseDecomposition(domaingraph.digraph())\n # morsegraph = DSGRN.MorseGraph()\n # morsegraph.assign(domaingraph, morsedecomposition)\n # break\n # if n == 10:\n # for v in graph.vertices:\n # parametergraph = DSGRN.ParameterGraph(DSGRN.Network('testnetwork.txt'))\n # parameter = parametergraph.parameter(single_gene_query.database.full_parameter_index(n,v,database.network.index(\"S\")))\n # domaingraph = DSGRN.DomainGraph(parameter)\n # morsedecomposition = DSGRN.MorseDecomposition(domaingraph.digraph())\n # morsegraph = DSGRN.MorseGraph()\n # morsegraph.assign(domaingraph, morsedecomposition)\n # with open(\"testmorsegraph{:02d}_{:02d}.gv\".format(n,v),\"w\") as f:\n # f.write(morsegraph.graphviz())\n # with open(\"testmorsegraph{:02d}.gv\".format(n),\"w\") as f:\n # f.write(morsegraph.graphviz())\n\n\n\n","sub_path":"analysis/testhysteresis.py","file_name":"testhysteresis.py","file_ext":"py","file_size_in_byte":2785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"102655555","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.9-x86_64/egg/fedservice/entity_statement/cache.py\n# Compiled at: 2019-12-09 11:21:04\n# Size of source mod 2**32: 1048 bytes\nimport logging\nfrom cryptojwt.jwt import utc_time_sans_frac\nlogger = logging.getLogger(__name__)\n\nclass ESCache(object):\n\n def __init__(self, allowed_delta=300):\n self._db = {}\n self.allowed_delta = allowed_delta\n\n def __setitem__(self, key, value):\n self._db[key] = value\n\n def __getitem__(self, item):\n try:\n statement = self._db[item]\n except KeyError:\n return\n else:\n if isinstance(statement, dict):\n _now = utc_time_sans_frac()\n if _now < statement['exp'] - self.allowed_delta:\n return statement\n del self._db[item]\n return\n else:\n return statement\n\n def __delitem__(self, key):\n del self._db[key]\n\n def __contains__(self, item):\n _val = self[item]\n if _val:\n return True\n return False","sub_path":"pycfiles/fedservice-0.6.0-py3.7/cache.cpython-37.py","file_name":"cache.cpython-37.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"204207640","text":"# Same idea as in cpp version.\nclass Solution(object):\n _BASE = ord('Z') - ord('A') + 1\n\n def convertToTitle(self, n):\n \"\"\"\n :type n: int\n :rtype: str\n \"\"\"\n assert n is not None\n assert n > 0\n \n title = ''\n while n > 0:\n title += chr(ord('A') + (n - 1) % self._BASE)\n n = (n - 1) / self._BASE\n \n # Reverse a string.\n return title[::-1]\n","sub_path":"py/168_ExcelSheetColumnTitle.py","file_name":"168_ExcelSheetColumnTitle.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"451341337","text":"import os\nimport shutil\nimport tensorflow as tf\nimport numpy as np\nimport sys\nfrom numpy import sqrt\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport matplotlib.pyplot as plt\nfrom utils import *\nfrom sklearn.metrics import confusion_matrix\n\nMODEL_NAME = os.path.basename(os.path.splitext(__file__)[0])\nlogger = get_logger(MODEL_NAME)\nSAVED_MODEL_PATH = \"{}/\".format(MODEL_NAME)\ntf.set_random_seed(deterministic_seed)\nnp.set_printoptions(threshold=np.nan)\n\n\ndef convert_label_to_simplex_point(label, num_classes=10):\n \"\"\"\n Function assumes labels go from 0 to num_classes - 1\n \"\"\"\n n = num_classes - 1\n if label == n:\n return np.ones(n) * (1 + sqrt(1 + n)) / n\n vec = np.zeros(n)\n vec[label] = 1\n return vec\n\n\ndef simplex_to_one_hot(point, num_classes=10):\n a = np.zeros(num_classes)\n if 1.0 not in point:\n a[num_classes - 1] = 1\n else:\n a[np.argmax(point)] = 1\n return a\n\n\ndef get_closest_simplex_point(point):\n n = len(point)\n entire_simplex = np.array([convert_label_to_simplex_point(label) for label in range(n + 1)])\n differences = []\n for simplex_point in entire_simplex:\n differences.append(np.linalg.norm(simplex_point - point))\n index = np.argmin(differences)\n return entire_simplex[index]\n\n\ndata = input_data.read_data_sets('MNIST-data', one_hot=True, seed=deterministic_seed)\n\ndata.train.simplex = np.array([convert_label_to_simplex_point(label.argmax()) for label in data.train.labels])\ndata.test.simplex = np.array([convert_label_to_simplex_point(label.argmax()) for label in data.test.labels])\ndata.test.cls = np.array([label.argmax() for label in data.test.labels])\ndata.train.cls = np.array([label.argmax() for label in data.train.labels])\n\nx = tf.placeholder(tf.float32, [None, img_size_flat], name=\"image\")\ny_true = tf.placeholder(tf.float32, [None, num_classes], name=\"y_true\")\ny_true_simplex = tf.placeholder(tf.float32, [None, num_classes - 1], name=\"y_simplex\")\n# First convolutional layer\nx_image = tf.reshape(x, [-1, 28, 28, 1])\n\nweights1 = get_weights([5, 5, 1, 32])\nbiases1 = get_bias([32])\nh_conv1 = tf.nn.relu(tf.nn.conv2d(x_image, weights1, strides=[1, 1, 1, 1], padding='SAME') + biases1)\nh_pool1 = tf.nn.max_pool(h_conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n# Second convolutional layer\nweights2 = get_weights([5, 5, 32, 64])\nbiases2 = get_bias([64])\n\nh_conv2 = tf.nn.relu(tf.nn.conv2d(h_pool1, weights2, strides=[1, 1, 1, 1], padding='SAME') + biases2)\nh_pool2 = tf.nn.max_pool(h_conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n# h_pool2 is now \"picture\" of size 7 * 7\n# First fully connected layer\nweights_fully_connected1 = get_weights([7 * 7 * 64, 1024])\nbiases_fully_connected1 = get_bias([1024])\n\nh_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])\nh_fully_connected1 = tf.nn.relu(tf.matmul(h_pool2_flat, weights_fully_connected1) + biases_fully_connected1)\n\n# Second fully connected\nweights_fully_connected2 = get_weights([1024, 1024])\nbiases_fully_connected2 = get_bias([1024])\n\nh_fully_connected2 = tf.matmul(h_fully_connected1, weights_fully_connected2) + biases_fully_connected2\n\n# Third fully connected layer\nweights_fully_connected3 = get_weights([1024, num_classes - 1])\nbiases_fully_connected3 = get_bias([num_classes - 1])\n\nh_fully_connected3 = tf.matmul(h_fully_connected2, weights_fully_connected3) + biases_fully_connected3\n\n# Finally, simplex!\ny_conv = h_fully_connected3\ndistance = tf.reduce_sum(tf.norm(y_true_simplex - y_conv, ord='euclidean', axis=1))\n\noptimizer = tf.train.AdamOptimizer(1e-4).minimize(distance)\n\nentire_simplex = np.array([convert_label_to_simplex_point(label) for label in range(num_classes)])\nentire_tf_simplex = tf.constant(entire_simplex, dtype=tf.float32)\ndifferences = tf.map_fn(lambda point: tf.norm(point - entire_tf_simplex, ord='euclidean', axis=1), y_conv)\nclosest_point_index = tf.argmin(differences, 1)\nclosest_point_on_simplex_to_y_conv = tf.map_fn(lambda single_index: entire_tf_simplex[single_index],\n closest_point_index, dtype=tf.float32)\ncorrect_prediction = tf.equal(y_true_simplex, closest_point_on_simplex_to_y_conv)\ncorrect_prediction = tf.map_fn(lambda truth: tf.reduce_all(truth), correct_prediction)\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsaver = tf.train.Saver()\n\ncurrent_batch = 0\nnumber_of_iterations = 10000\nwith tf.Session(config=config) as sess:\n sess.run(tf.global_variables_initializer())\n if len(sys.argv) != 2:\n raise ValueError(\"Wrong number of parameters\")\n\n if sys.argv[1] == \"train\":\n shutil.rmtree(SAVED_MODEL_PATH, ignore_errors=True)\n for i in range(number_of_iterations):\n current_batch += 50\n current_batch %= 55000\n batch_images = data.train.images[current_batch:current_batch + 50]\n batch_simplex = data.train.simplex[current_batch:current_batch + 50]\n if i % 100 == 0:\n train_accuracy = accuracy.eval(feed_dict={x: batch_images, y_true_simplex: batch_simplex})\n print(\"step %d, training accuracy: %.4f\" % (i, train_accuracy))\n if i % 1000 == 0:\n saver.save(sess=sess, save_path=SAVED_MODEL_PATH)\n optimizer.run(feed_dict={x: batch_images, y_true_simplex: batch_simplex})\n saver.save(sess=sess, save_path=SAVED_MODEL_PATH)\n\n elif sys.argv[1] == \"extract_layers\":\n saver.restore(sess=sess, save_path=SAVED_MODEL_PATH)\n\n flatten_h_pool1 = tf.reshape(h_pool1, shape=[-1, 14 * 14 * 32])\n flatten_h_pool2 = tf.reshape(h_pool2, shape=[-1, 7 * 7 * 64])\n h_fully_connected1 = tf.reshape(h_fully_connected1, shape=[-1, 1024])\n h_fully_connected2 = tf.reshape(h_fully_connected2, shape=[-1, 1024])\n h_fully_connected3 = tf.reshape(h_fully_connected3, shape=[-1, 9])\n batch_size = 200\n layer_file_names = [\"simplex_3variation_1.txt\", \"simplex_3variation_2.txt\", \"simplex_3variation_3.txt\",\n \"simplex_3variation_4.txt\", \"simplex_3variation_5.txt\"]\n file_handles = [open(layer_filename, \"a\") for layer_filename in layer_file_names]\n for i in range(len(data.train.images) // batch_size):\n dict_to_feed = {x: data.train.images[batch_size * i: batch_size * i + batch_size],\n y_true: data.train.labels[batch_size * i: batch_size * i + batch_size]}\n\n np.savetxt(file_handles[0], flatten_h_pool1.eval(feed_dict=dict_to_feed), fmt=\"%f\")\n np.savetxt(file_handles[1], flatten_h_pool2.eval(feed_dict=dict_to_feed), fmt=\"%f\")\n np.savetxt(file_handles[2], h_fully_connected1.eval(feed_dict=dict_to_feed), fmt=\"%f\")\n np.savetxt(file_handles[3], h_fully_connected2.eval(feed_dict=dict_to_feed), fmt=\"%f\")\n np.savetxt(file_handles[4], h_fully_connected3.eval(feed_dict=dict_to_feed), fmt=\"%f\")\n if i % 10 == 0:\n print(\"Current iteration: {}\".format(i))\n [file.close() for file in file_handles]\n elif sys.argv[1] == \"restore\":\n saver.restore(sess=sess, save_path=SAVED_MODEL_PATH)\n all_points = np.zeros(len(data.test.images))\n batch = 100\n for j in range(len(all_points) // batch):\n tmp = np.array(data.test.simplex[batch * j:batch * j + batch])\n all_points[batch * j: batch * j + batch] = closest_point_index.eval(\n feed_dict={x: np.array(data.test.images[batch * j:batch * j + batch]),\n y_true_simplex: tmp})\n cm = confusion_matrix(y_true=data.test.cls, y_pred=all_points)\n\n all_precisions = []\n all_recalls = []\n for i in range(num_classes):\n precision = get_precision(i, cm)\n recall = get_recall(i, cm)\n all_precisions.append(precision)\n all_recalls.append(recall)\n print(\"Precision for {}: %.3f\".format(i) % precision)\n print(\"Recall for {}: %.3f\".format(i) % recall)\n print(\"Model accuracy: {}\".format(get_model_accuracy(all_points, data.test.cls)))\n else:\n raise ValueError(\"Bad parameter given.\")\n","sub_path":"basic/simplex_variation_3.py","file_name":"simplex_variation_3.py","file_ext":"py","file_size_in_byte":8267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"635876651","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport sys\nimport os\n \ndef gen_mod_thread(feat_def,str_in):\n str_out=str_in\n feat_name=feat_def['name']\n\n #Support for the thrad\n __TAG_THREAD_VAR_DEC__=\"\"\n __TAG_THREAD_FUN_DEC__=\"\"\n __TAG_THREAD_FUN_VIRTUAL_DEC__=\"\"\n __TAG_THREAD_VAR_CONSTRUCT__=\"\"\n __TAG_THREAD_VAR_DESTRUCT__=\"\"\n __TAG_MUTEX_FUN_CALLBACK_IMP__=\"\"\n __TAG_THREAD_FUN_IMP__=\"\"\n __TAG_THREAD_INIT__=\"\"\n __TAG_THREAD_DESTORY__=\"\"\n\n if feat_def.has_key('thread'):\n for item in feat_def['thread']:\n __TAG_THREAD_VAR_DEC__+=\" os::Thread * m_pThread%s;\\r\\n\"%(item['name'])\n __TAG_THREAD_FUN_DEC__+=\"virtual void Thread%sCallBack();\"%(item['name'])\n __TAG_THREAD_FUN_VIRTUAL_DEC__+=\"virtual void Thread%sCallBack()=0;\"%(item['name'])\n __TAG_THREAD_VAR_CONSTRUCT__+=\"\"\"m_pThread%s = new os::Thread(\"ModXxx\");\\r\\n\"\"\"%(item['name']);\n __TAG_THREAD_VAR_DESTRUCT__+=\"\"\"\n if(NULL != m_pThread%s)\n {\n delete m_pThread%s ;\n }\n \"\"\"%(item['name'],item['name'])\n __TAG_MUTEX_FUN_CALLBACK_IMP__+=\"\"\"\n extern \"C\" void* %s_fun(void *pthis)\n {\n AutoModXxx *pThis=(AutoModXxx *)pthis;\n if(NULL != pThis)\n pThis->Thread%sCallBack();\n return 0;\n };\n \"\"\"%(item['name'].lower(),item['name'])\n __TAG_THREAD_FUN_IMP__+=\"\"\"\nvoid ModXxx::Thread%sCallBack()\n{\n \n};\n\"\"\"%(item['name'])\n __TAG_THREAD_INIT__+=\"\"\"\n if (m_pThread%s->Spawn(%s_fun,(void*)this,os::priNormal,%s) != SDB_RET_SUCCED)\n {\n Log(utl::LOGGER_LEVEL_ERRO,(char *)m_strModName.c_str(),(char *)__FUNCTION__,\"create %s_timer task failure.\");\n return SDB_RET_FAILED;\n }; \n \"\"\"%(item['name'],item['name'].lower(),item['stacksize'],item['name'].lower())\n __TAG_THREAD_DESTORY__+=\"\"\"\n if(NULL != m_pThread%s)\n m_pThread%s->Delete();\n \"\"\"%(item['name'],item['name'])\n \n \n \n str_out=str_out.replace(\"__TAG_THREAD_VAR_DEC__\",__TAG_THREAD_VAR_DEC__)\n str_out=str_out.replace(\"__TAG_THREAD_FUN_DEC__\",__TAG_THREAD_FUN_DEC__)\n str_out=str_out.replace(\"__TAG_THREAD_FUN_VIRTUAL_DEC__\",__TAG_THREAD_FUN_VIRTUAL_DEC__) \n str_out=str_out.replace(\"__TAG_THREAD_VAR_CONSTRUCT__\",__TAG_THREAD_VAR_CONSTRUCT__)\n str_out=str_out.replace(\"__TAG_THREAD_VAR_DESTRUCT__\",__TAG_THREAD_VAR_DESTRUCT__)\n str_out=str_out.replace(\"__TAG_MUTEX_FUN_CALLBACK_IMP__\",__TAG_MUTEX_FUN_CALLBACK_IMP__)\n str_out=str_out.replace(\"__TAG_THREAD_FUN_IMP__\",__TAG_THREAD_FUN_IMP__)\n str_out=str_out.replace(\"__TAG_THREAD_INIT__\",__TAG_THREAD_INIT__)\n str_out=str_out.replace(\"__TAG_THREAD_DESTORY__\",__TAG_THREAD_DESTORY__) \n return str_out\n \n\n","sub_path":"scripts/gen_mod_thread.py","file_name":"gen_mod_thread.py","file_ext":"py","file_size_in_byte":2861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"25623633","text":"def format_decorator(format_func, *args):\n def format_wrapper(*args):\n formatted_rows = []\n if args:\n for row in args[0]:\n formatted_rows.append(format_func(row))\n return formatted_rows\n\n return format_wrapper\n\n\n@format_decorator\ndef format_task(row):\n item = f\"Item: {row[0]:<4}\"\n event_type = f\"{row[1]}\"\n task = f\"{row[2]:<100}\"\n date_set = f\"Date: {row[3]}\"\n days_to_complete = f\"Days to complete: {row[5]}\"\n is_complete = f\"Completed: {row[6]:<5}\"\n return f\"{item} {date_set} {event_type}: {task} [{days_to_complete}, {is_complete}]\"\n\n\n@format_decorator\ndef format_note(row):\n item = f\"Item: {row[0]:<4}\"\n event_type = f\"{row[1]}\"\n note = f\"{row[2]}\"\n date_set = f\"Date: {row[3]}\"\n return f\"{item} {date_set} {event_type}: {note}\"\n","sub_path":"time_management/format_decorators.py","file_name":"format_decorators.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"143410945","text":"# 寻找最佳矩形柱长宽Python程序。\n# 目标相位为\"Phix.xlsx\"和\"Phiy.xlsx\"。扫描得到的数据为\"Phi.xlsx\"。\n# 程序执行结果保存在\"Rect.xlsx\"\n# %% 导入Python程序依赖包\nimport pandas as pd\nimport numpy as np\n# %% 读取数据\nx0 = pd.read_excel('Phix.xlsx')\ny0 = pd.read_excel('Phiy.xlsx')\nphix = pd.read_excel('Phi.xlsx')\nphix = np.array(phix)\nx0 = np.array(x0)\ny0 = np.array(y0)\nphiy = phix.T\n# %% 遍历目标相位,在扫描相位中找最接近的尺寸输出\ndata = []\na = []\nb = []\n\nfor w in range(5):\n for z in range(20):\n FoM = np.zeros((51, 51))\n for m in range(51):\n for n in range(51):\n FoM[m, n] = abs(phix[m, n]-x0[w, z])+abs(phiy[m, n]-y0[w, z])\n# minnum = min(min(num) for num in FoM)\n\n min = FoM[0][0]\n\n p = 0\n q = 0\n for x in range(51):\n for y in range(51):\n if FoM[x, y] < min:\n p = x\n q = y\n min = FoM[x, y]\n a.append(p)\n b.append(q)\n data.append(min)\n# %% 保存执行结果到\"Rect.xlsx\"\nxl = np.array(b)\nx0length = (xl)*0.01+0.1\nyl = np.array(a)\ny0length = (yl)*0.01+0.1\ndoc = pd.DataFrame([data, x0length, y0length]).T\ndoc.columns = ['min', 'RECTxength', 'RECTyength']\ndoc.to_excel('Rect.xlsx')\n","sub_path":"Lumerical/论文2改论文1/Find_3.py","file_name":"Find_3.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"229628894","text":"import tkinter as tk\nfrom tkinter import filedialog\n\nimport PIL\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\nimport cv2\n#import moviepy.editor as mpe\nfrom moviepy.video.io.VideoFileClip import VideoFileClip\nfrom moviepy.audio.io.AudioFileClip import AudioFileClip\nimport numpy as np\nfrom scipy.stats import truncnorm\nimport tensorflow_hub as hub\nfrom tkinter.messagebox import showerror\nfrom threading import Thread\nfrom concurrent.futures import Future\nimport algorithms.MorphingLabels as MorphingLabels\nimport tarfile, requests\nimport os\nfrom PIL import ImageTk as itk\n\ntf.disable_v2_behavior()\n# Load compressed models from tensorflow_hub\nos.environ['TFHUB_MODEL_LOAD_FORMAT'] = 'COMPRESSED'\n\n\ndef call_with_future(fn, future, args, kwargs):\n try:\n result = fn(*args, **kwargs)\n future.set_result(result)\n except Exception as exc:\n future.set_exception(exc)\n\ndef threaded(fn):\n def wrapper(*args, **kwargs):\n future = Future()\n Thread(target=call_with_future, args=(fn, future, args, kwargs)).start()\n return future\n return wrapper\n\nclass Morphing2(tk.Frame):\n num_samples = 1\n num_interps = 100\n truncation = 0.2\n noise_seed_A = 0\n noise_seed_B = 0\n\n def __init__(self, parent):\n tk.Frame.__init__(self, parent)\n top_padding = 50\n self.video = None\n self.label_select_styl = tk.Label(self, text='Choose images to create morphing: ', font=(\"TkDefaultFont\", 16))\n self.label_select_styl.place(x=60, y=top_padding-10, height=30, width=350)\n self.choices = MorphingLabels.get_labels()\n self.image_number_1 = tk.StringVar(self)\n self.image_number_1.set(self.choices[0])\n\n self.choose_box_param1 = tk.OptionMenu(self, self.image_number_1, *self.choices, command=self.option_changed)\n self.choose_box_param1.config(font=(\"TkDefaultFont\", 12))\n dropdown1 = self.nametowidget(self.choose_box_param1.menuname).config(font=(\"TkDefaultFont\", 12))\n self.choose_box_param1.place(x=460, y=top_padding + 20, height=40, width=400)\n\n self.image_number_2 = tk.StringVar(self)\n self.image_number_2.set(self.choices[0])\n self.choose_box_param2 = tk.OptionMenu(self, self.image_number_2, *self.choices, command=self.option_changed)\n self.choose_box_param2.config(font=(\"TkDefaultFont\", 12))\n dropdown2 = self.nametowidget(self.choose_box_param2.menuname).config(font=(\"TkDefaultFont\", 12))\n self.choose_box_param2.place(x=460, y=top_padding + 61, height=40, width=400)\n\n self.label1 = tk.Label(self, text='Choose first image:', font=44, background=\"lightgrey\").place(\n x=60, y=top_padding + 20, height=40, width=400)\n self.label2 = tk.Label(self, text='Choose second image:', font=44, background=\"lightgrey\").place(\n x=60, y=top_padding + 61, height=40, width=400)\n self.generate_button = tk.Button(self, text='Generate morphing', font=44, command=self.create_morphing).place(\n x=60, y=top_padding + 102, height=40, width=400)\n self.save_button = tk.Button(self, text='Save result', font=44, bg='green', command=self.save_morphing).place(\n x=460, y=top_padding + 102, height=40, width=400)\n self.label1 = tk.Label(self, text='PREVIEW', font=(\"TkDefaultFont\", 16)).place(x=410, y=220, height=40, width=100)\n self.preview_image1 = tk.Label(self, background=\"black\")\n self.preview_image1.place(x=137, y=280, height=256, width=256)\n self.preview_image2 = tk.Label(self, background=\"black\")\n self.preview_image2.place(x=537, y=280, height=256, width=256)\n\n self.option_changed()\n # Load the model\n self.load_model_h = self.loadModel()\n\n def option_changed(self, *args):\n image1_path = os.path.join(\"algorithms\", \"models\", \"morphing_imgs\", MorphingLabels.get_img(self.image_number_1.get()))\n image2_path = os.path.join(\"algorithms\", \"models\", \"morphing_imgs\", MorphingLabels.get_img(self.image_number_2.get()))\n img1 = itk.PhotoImage(image=PIL.Image.open(image1_path))\n img2 = itk.PhotoImage(image=PIL.Image.open(image2_path))\n self.preview_image1.config(image=img1)\n self.preview_image1.image = img1\n self.preview_image1.place(x=137, y=280, height=256, width=256)\n self.preview_image2.config(image=img2)\n self.preview_image2.image = img2\n self.preview_image2.place(x=537, y=280, height=256, width=256)\n\n @threaded\n def loadModel(self):\n tmp_path = os.path.join(\"algorithms\", \"models\", \"MORPHING\")\n file_name = os.path.join(\"algorithms\", \"models\", \"MORPHING\", \"tmp.tar.gz\")\n if not os.path.exists(tmp_path) or len(os.listdir(tmp_path)) == 0:\n if not os.path.exists(tmp_path):\n os.mkdir(tmp_path)\n url = 'https://tfhub.dev/deepmind/biggan-deep-128/1?tf-hub-format=compressed'\n print(\"Downloading BigGAN module\")\n r = requests.get(url, allow_redirects=True)\n open(file_name, 'wb').write(r.content)\n file = tarfile.open(file_name)\n print(\"Extracting BigGAN module\")\n file.extractall(tmp_path)\n file.close()\n os.remove(file_name)\n\n tf.reset_default_graph()\n tf.compat.v1.disable_eager_execution()\n print('Loading BigGAN module')\n self.module = hub.Module('algorithms/models/MORPHING')\n self.inputs = {k: tf.placeholder(v.dtype, v.get_shape().as_list(), k)\n for k, v in self.module.get_input_info_dict().items()}\n self.output = self.module(self.inputs)\n self.input_z = self.inputs['z']\n self.input_y = self.inputs['y']\n self.input_trunc = self.inputs['truncation']\n self.dim_z = self.input_z.shape.as_list()[1]\n self.vocab_size = self.input_y.shape.as_list()[1]\n initializer = tf.global_variables_initializer()\n self.sess = tf.Session()\n self.sess.run(initializer)\n print(\"Morphing is ready to go!!!\")\n\n def save_morphing(self):\n if self.video is None:\n tk.messagebox.showerror(title=\"Error\", message=\"There is nothing to save. Create morphing first.\")\n return\n\n filename = filedialog.asksaveasfile(initialdir=\"results\", mode='wb', defaultextension=\".mp4\",\n filetypes=((\"MP4\", \"*.mp4\"), (\"all files\", \"*.*\")))\n if not filename:\n return\n self.video.write_videofile(filename.name, fps=100)\n\n def truncated_z_sample(self, batch_size, truncation=1., seed=None):\n state = None if seed is None else np.random.RandomState(seed)\n values = truncnorm.rvs(-2, 2, size=(batch_size, self.dim_z), random_state=state)\n return truncation * values\n\n def one_hot(self, index, vocab_size):\n index = np.asarray(index)\n if len(index.shape) == 0:\n index = np.asarray([index])\n assert len(index.shape) == 1\n num = index.shape[0]\n output = np.zeros((num, vocab_size), dtype=np.float32)\n output[np.arange(num), index] = 1\n return output\n\n def one_hot_if_needed(self, label, vocab_size):\n label = np.asarray(label)\n if len(label.shape) <= 1:\n label = self.one_hot(label, vocab_size)\n assert len(label.shape) == 2\n return label\n\n # Function generating image from noise using GAN\n def sample(self, sess, noise, label, vocab_size, truncation=1., batch_size=8,):\n noise = np.asarray(noise)\n label = np.asarray(label)\n num = noise.shape[0]\n if len(label.shape) == 0:\n label = np.asarray([label] * num)\n if label.shape[0] != num:\n raise ValueError('Got # noise samples ({}) != # label samples ({})'\n .format(noise.shape[0], label.shape[0]))\n label = self.one_hot_if_needed(label, vocab_size)\n ims = []\n for batch_start in range(0, num, batch_size):\n s = slice(batch_start, min(num, batch_start + batch_size))\n feed_dict = {self.input_z: noise[s], self.input_y: label[s], self.input_trunc: truncation}\n ims.append(sess.run(self.output, feed_dict=feed_dict))\n ims = np.concatenate(ims, axis=0)\n assert ims.shape[0] == num\n ims = np.clip(((ims + 1) / 2.0) * 256, 0, 255)\n ims = np.uint8(ims)\n return ims\n\n # Basic interpolation function\n def interpolate(self, A, B, num_interps):\n if A.shape != B.shape:\n raise ValueError('A and B must have the same shape to interpolate.')\n alphas = np.linspace(0, 1, num_interps)\n return np.array([(1 - a) * A + a * B for a in alphas])\n\n # Function interpolating values from images (noises from which GAN generates images) to create mix of them\n def interpolate_and_shape(self, A, B, num_interps):\n interps = self.interpolate(A, B, num_interps)\n return (interps.transpose(1, 0, *range(2, len(interps.shape)))\n .reshape(self.num_samples * num_interps, *interps.shape[2:]))\n\n # Function creating video showing morphing between 2 photos generated by GAN\n def create_morphing(self):\n # Check if model is loaded\n self.load_model_h.result()\n category_A = MorphingLabels.get_value(self.image_number_1.get())\n category_B = MorphingLabels.get_value(self.image_number_2.get())\n z_A, z_B = [self.truncated_z_sample(self.num_samples, self.truncation, noise_seed)\n for noise_seed in [self.noise_seed_A, self.noise_seed_B]]\n y_A, y_B = [self.one_hot([category] * self.num_samples, self.vocab_size)\n for category in [category_A, category_B]]\n z_interp = self.interpolate_and_shape(z_A, z_B, self.num_interps)\n y_interp = self.interpolate_and_shape(y_A, y_B, self.num_interps)\n ims = self.sample(self.sess, z_interp, y_interp, self.vocab_size, truncation=self.truncation)\n video_name = 'video.avi'\n height, width, layers = ims[0].shape\n self.video = cv2.VideoWriter(video_name, 0, 50, (width, height))\n for img in ims:\n self.video.write(img)\n cv2.destroyAllWindows()\n self.video.release()\n self.video = VideoFileClip(video_name)\n os.startfile(\"video.avi\")","sub_path":"algorithms/Morphing2.py","file_name":"Morphing2.py","file_ext":"py","file_size_in_byte":10477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"173808764","text":"def find_gcd(x, y):\r\n while(y):\r\n x, y = y, x%y\r\n return x\r\nN = int(input())\r\nA = list(map(int, input().split()))\r\nnum1 = A[0]\r\nnum2 = A[1]\r\ngcd = find_gcd(num1, num2)\r\nfor i in range(2, len(A)):\r\n gcd = find_gcd(gcd, A[i])\r\nprint(gcd)\r\n","sub_path":"WEEK6 [02-May-21]/Rakuten - Stack It.py","file_name":"Rakuten - Stack It.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"558388664","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport djbetty.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('testcontent', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='testcontentdetailimage',\n name='detail_image',\n field=djbetty.fields.ImageField(null=True, blank=True, caption_field='detail_caption', alt_field='detail_alt', default=None),\n ),\n ]\n","sub_path":"example/testcontent/migrations/0002_auto_20151103_1402.py","file_name":"0002_auto_20151103_1402.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"141688254","text":"#############################################################\r\n# FILE : NonRecursiveMystery.py\r\n# WRITER : Leshem Choshen + borgr + 305385338\r\n# EXERCISE : intro2cs ex5 200132014\r\n# DESCRIPTION:\r\n# An arithemtic function that:\r\n# gets a number and\r\n# returns the sum of its fractions, not self include, includes 1.\r\n#############################################################\r\ndef mystery_computation(number):\r\n \"\"\"An arithemtic function that:\r\n gets a number and\r\n returns the sum of its fractions, not self include, includes 1.\r\n \"\"\"\r\n sm_modu = 0\r\n for i in range (1,number):\r\n if number%i == 0:\r\n sm_modu += i\r\n return sm_modu\r\n","sub_path":"ex05/NonRecursiveMystery.py.py","file_name":"NonRecursiveMystery.py.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"} +{"seq_id":"489029960","text":"import threading\n\nimport telebot, schedule, sqlite3, time, datetime\nfrom multiprocessing import *\nbot = telebot.TeleBot(\"1680703308:AAFYj5I9_ZpvVpSf2nZrTO3W3ovt49UyAIc\")\npapa_id = '1062973400'\nchannel_name = '-1001497838043'\npost_time = '17:00'\n\n\ndef run_threaded(job_func):\n job_thread = threading.Thread(target=job_func)\n job_thread.start()\n\n\nclass P_schedule():\n def start_schedule():\n schedule.every().tuesday.at(post_time).do(run_threaded, public_post)\n schedule.every().thursday.at(post_time).do(run_threaded, public_post)\n schedule.every().saturday.at(post_time).do(run_threaded, public_post)\n\n while True:\n schedule.run_pending()\n time.sleep(1)\n\n\ndef start_process():\n p1 = Process(target=P_schedule.start_schedule, args=()).start()\n\n\ndef public_post():\n try:\n db = sqlite3.connect(\"database.db\")\n cursor = db.cursor()\n cursor.execute(\"SELECT posts FROM datas\")\n post = cursor.fetchone()[0]\n # bot.send_message(channel_name, post)\n bot.send_message(papa_id, f'Новый пост был опубликован. Постов в очереди: {len(cursor.fetchall())}')\n cursor.execute(f\"DELETE FROM datas WHERE posts='{post}'\")\n db.commit()\n except:\n bot.send_message(papa_id, f'Посты в очереди закончились. Ничего не опубликованно(')\n\n\n@bot.message_handler(content_types='text')\ndef post(message):\n db = sqlite3.connect(\"database.db\")\n cursor = db.cursor()\n if message.text == '/order':\n cursor.execute(\"SELECT id, posts FROM datas\")\n res = cursor.fetchall()\n bot.send_message(message.chat.id, '\\n'.join([str(post[0]) + \". \" + post[1] for post in res])\n if len(res) > 0 else \"В очереди нет постов.\")\n elif message.text.split()[0] == '/delete':\n try:\n cursor.execute(f\"DELETE FROM datas WHERE id={message.text.split()[1]}\")\n db.commit()\n bot.send_message(message.chat.id, \"Пост успешно удалён!\")\n except:\n bot.send_message(message.chat.id, \"Ошибка!\")\n else:\n cursor.execute(\"INSERT INTO datas(posts) VALUES(?)\", (message.text,))\n db.commit()\n bot.send_message(message.chat.id, \"Пост добавлен в очередь!\")\n\n\nif __name__ == '__main__':\n start_process()\n try:\n bot.polling(none_stop=True)\n except:\n pass\n","sub_path":"PodruchnyjBot.py","file_name":"PodruchnyjBot.py","file_ext":"py","file_size_in_byte":2507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"16"}